<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://index.cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Jianghaoyu</id>
		<title>cslt Wiki - 用户贡献 [zh-cn]</title>
		<link rel="self" type="application/atom+xml" href="http://index.cslt.org/mediawiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Jianghaoyu"/>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E7%89%B9%E6%AE%8A:%E7%94%A8%E6%88%B7%E8%B4%A1%E7%8C%AE/Jianghaoyu"/>
		<updated>2026-04-07T04:54:42Z</updated>
		<subtitle>用户贡献</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-10-24</id>
		<title>2022-10-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-10-24"/>
				<updated>2022-10-24T10:58:55Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Homonet experiments with MNIST: for generaion model&lt;br /&gt;
* ICASSP papers&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Homo loss on SID&lt;br /&gt;
* Qiang's Paper&lt;br /&gt;
||&lt;br /&gt;
* ICASSP papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Homonet on aishell&lt;br /&gt;
* Homonet on Google speech command&lt;br /&gt;
||&lt;br /&gt;
* Continue on Homonet&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* paper&lt;br /&gt;
* code, data for dataset release [http://cchen.cslt.org/mvs/ web]&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
* check code, data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* update related work of other domain [https://z1et6d3xtb.feishu.cn/docx/doxcn5QLpYfgqhCxvuwteU854mb Outline]&lt;br /&gt;
||&lt;br /&gt;
* review and experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* pick enroll audio and face&lt;br /&gt;
* graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* pick enroll audio&lt;br /&gt;
* writing paper&lt;br /&gt;
||&lt;br /&gt;
* Finish paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* Sunine following up: Large-margin fine-tuning strategy&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* The experiment of mutil-track music generation&lt;br /&gt;
* The paper for icassp &lt;br /&gt;
||&lt;br /&gt;
* Project Summary and Report&lt;br /&gt;
* A fresh look at the music generation paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-10-24</id>
		<title>2022-10-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-10-24"/>
				<updated>2022-10-24T10:58:14Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Homonet experiments with MNIST: for generaion model&lt;br /&gt;
* ICASSP papers&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Homo loss on SID&lt;br /&gt;
* Qiang's Paper&lt;br /&gt;
||&lt;br /&gt;
* ICASSP papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Homonet on aishell&lt;br /&gt;
* Homonet on Google speech command&lt;br /&gt;
||&lt;br /&gt;
* Continue on Homonet&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* paper&lt;br /&gt;
* code, data for dataset release [http://cchen.cslt.org/mvs/ web]&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
* check code, data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* update related work of other domain [https://z1et6d3xtb.feishu.cn/docx/doxcn5QLpYfgqhCxvuwteU854mb Outline]&lt;br /&gt;
||&lt;br /&gt;
* review and experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Extract embedding&lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* pick enroll audio&lt;br /&gt;
* writing paper&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* Sunine following up: Large-margin fine-tuning strategy&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* The experiment of mutil-track music generation&lt;br /&gt;
* The paper for icassp &lt;br /&gt;
||&lt;br /&gt;
* Project Summary and Report&lt;br /&gt;
* A fresh look at the music generation paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-10-17</id>
		<title>2022-10-17</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-10-17"/>
				<updated>2022-10-17T10:50:19Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* prepare for ICASSP 2023&lt;br /&gt;
||&lt;br /&gt;
* homomorphic loss for each layer of current model&lt;br /&gt;
* mixup for transformer based model&lt;br /&gt;
* finish the paper &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* finished gradient based methods [https://z1et6d3xtb.feishu.cn/docx/doxcn5QLpYfgqhCxvuwteU854mb Outline]&lt;br /&gt;
||&lt;br /&gt;
* Experiments and review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Extract embedding&lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Prepare papers&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* How do deep speaker models treat silence and noises&lt;br /&gt;
||&lt;br /&gt;
* Sunine following up&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* The experiment of mutil-track music generation&lt;br /&gt;
* The paper for icassp&lt;br /&gt;
||&lt;br /&gt;
* The experiment of mutil-track music generation&lt;br /&gt;
* The paper for icassp&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-10-10</id>
		<title>2022-10-10</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-10-10"/>
				<updated>2022-10-10T10:40:44Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Literature review for production model&lt;br /&gt;
* Review for TASLP&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* APT-Lab progress&lt;br /&gt;
* Hard trials (derivation and listening)&lt;br /&gt;
||&lt;br /&gt;
* Overview paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Experiments and paper for icassp 2023&lt;br /&gt;
||&lt;br /&gt;
* continue on icassp 2023&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Experiment and summary of gradient based methods [https://z1et6d3xtb.feishu.cn/docx/doxcn5QLpYfgqhCxvuwteU854mb Outline]&lt;br /&gt;
||&lt;br /&gt;
* finish it&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Graduation thesis&lt;br /&gt;
* Work report&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Extract embedding&lt;br /&gt;
* Process the data&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Generate LiRAVD&lt;br /&gt;
* Extract embedding and compute score&lt;br /&gt;
||&lt;br /&gt;
* Finish paper writing&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Translation overview&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* The experiment of mutil-track music generation &lt;br /&gt;
||&lt;br /&gt;
* The experiment of mutil-track music generation &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-10-03</id>
		<title>2022-10-03</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-10-03"/>
				<updated>2022-10-03T10:48:42Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review of Uyghr/Kazakh/Kryz ASR Chinese version done&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Finish my PhD proposal&lt;br /&gt;
* start experiments about speech engrave&lt;br /&gt;
||&lt;br /&gt;
* verify speech engrave on clean data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Train ASR model on GRID and test WER&lt;br /&gt;
* Finish VCA-GAN experiments&lt;br /&gt;
* Prepare weekly-reading report&lt;br /&gt;
||&lt;br /&gt;
* Human hearing test&lt;br /&gt;
* WER&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* finish gradient based methods&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Modify the paper&lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* data VAD&lt;br /&gt;
* Graduation thesis&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Get result for MOBIO&lt;br /&gt;
* pre-process for data&lt;br /&gt;
* Completed part of the paper writing.[http://index.cslt.org/mediawiki/images/e/e7/LiRAVD_Low_information_Real_world_Audio_Visual_Dataset_V0.1.pdf LiRAVD_draft_V0.1]&lt;br /&gt;
||&lt;br /&gt;
* Get result from all data&lt;br /&gt;
* Complete the first draft.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* collate the experiment results and summarize the time mask method&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* The music generation experiment of mutil-track-instruments&lt;br /&gt;
||&lt;br /&gt;
* The music generation experiment of mutil-track-instruments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-09-26</id>
		<title>2022-09-26</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-09-26"/>
				<updated>2022-09-26T10:54:03Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review low-resource ASR&lt;br /&gt;
||&lt;br /&gt;
* Complete the review&lt;br /&gt;
||&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Proposals on &lt;br /&gt;
** music generation&lt;br /&gt;
** binary speaker embedding&lt;br /&gt;
** acoustic-aware learning&lt;br /&gt;
||&lt;br /&gt;
* Hard trials paper...&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish draft&lt;br /&gt;
* pre-process CMLR data&lt;br /&gt;
||&lt;br /&gt;
* Train ASR model on GRID and test WER&lt;br /&gt;
* Train VTS model on CMLR single spk&lt;br /&gt;
* Train VCA-GAN&lt;br /&gt;
* Prepare weekly-reading report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Revise paper&lt;br /&gt;
* make a resume&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* weekly report&lt;br /&gt;
* Prepare the environment of Wenet&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish data collection&lt;br /&gt;
* Finish CLLR score fusion&lt;br /&gt;
* Try the maskFace [http://index.cslt.org/mediawiki/images/5/5c/220926-CRM.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* Finish tools for get LiRAVD&lt;br /&gt;
* Extract embedding from LiRAVD and get results&lt;br /&gt;
* Complete the remaining benchmarks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* postgraduate recommendation&lt;br /&gt;
* time-aligned mask&lt;br /&gt;
||&lt;br /&gt;
* finish and summarize time-aligned mask&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* Read the music generation documents&lt;br /&gt;
* The scheme of mutil-track-instruments&lt;br /&gt;
||&lt;br /&gt;
* The scheme of mutil-track-instruments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2022-09-23T12:21:48Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周五晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
|   ||  || PPT模板 ||[[媒体文件:Weeklyreading_template.rar]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun    || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen     || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang       || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang    || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi   || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han      || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li    || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang  ||  Unsupervised Audio-Visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang      || Inter Dataset Variability Compensation ||   [[媒体文件:Inter_dataset_variability_compensation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi  || One Shot VC || [[媒体文件:One_shot_VC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jingxin Shen || Thermal Speaking || [[媒体文件:Thermal_Speaking_2021.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/23  ||Sirui Li &amp;amp; Zixi Yan || Wav2vec-U Experimental Report || [[媒体文件:Wav2vec-U_experimental_report.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/20  ||Renmiao Chen || Is Someone Speaking? || [[媒体文件:Is_Someone_Speaking_Exploring_Long-term_Temporal_Features.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/28  ||Chen Chen    || WenetSpeech Introduction || [[媒体文件:WenetSpeech_Dataset_Introduction.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/10  ||Weida Liang  || Cycle-loss Exemplar Autoencoder || [[媒体文件:Cycle-loss_Exemplar_Autoencoder.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/17  ||吾买尔江      || Modulation Spectrum || [[媒体文件:Modulation_Spectrum.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/24  ||Chen Chen    || S-DCCRN || [[媒体文件:S-DCCRN_pdf.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/01  ||Pengqi Li    || GuidedMix: An on-the-fly data augmentation approach for robust speaker recognition system || [[媒体文件:201201-GuidedMix-LPQ.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/08  ||Renmiao Chen || Multimodal preson verification ||  [[媒体文件:Multimodal_preson_verification.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/15  ||Ruihai Hou   || Crossmodal clustered contrastive learning: Grounding of spoken language to gesture || [[媒体文件:Crossmodal_clustered_contrasti.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/29  ||Zixi Yan     || Capsules Network || [[媒体文件:Capsules_Network.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/05  ||Sirui Li     || Self-Supervised Learning for speech recognition with Intermediate layer supervision || [[媒体文件:SSL with Intermediate layer supervision.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/12  ||Weida Liang  || FragmentVC || [[媒体文件:FragmentVC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/19  ||Haoyu Jiang  || Multi-modality Associative Bridging through Memory: Speech Sound Recollected from Face Video || [[媒体文件:Multi-modality_Associative_Bridging_through_Memory.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/14  ||             || Interspeech 2021 Review || [[媒体文件:Interspeech_paper_review_min.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/16  ||Chen Chen    || Audio Visual HuBERT || [[媒体文件:AVHuBERT.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/04  ||Pengqi Li    || Study of Visualization || [[媒体文件:Visualization.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||Renmiao Chen || Can audio-visual integration strengthen robustness under multimodal attacks? || [[媒体文件:Audio-Visual_Robustness_Under_Multimodal_Attacks.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||吾买尔江      || Signal Separation || [[媒体文件:Signal_Separation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/18  ||Chen Chen    || Overview on Lip Reading and Audio-visual Speech Recognition || [[媒体文件:LipReadingAndAVSR.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/01  ||Ruihai Hou   || Scalable Identity-Oriented Speech Retrieval || [[媒体文件:Scalable_Identity-Oriented_Speech_Retrieval.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/08  ||Zixi Yan     || Wav2vec related papers share || [[媒体文件:Wav2vec_related_papers.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/22  ||Sirui Li     || Speech-Based Language Modelling || [[媒体文件:Speech-Based Language Modelling.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/29  ||Haoyu Jiang  || Models of Speaker Recognition || [[媒体文件:Models_of_Speaker_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/13  ||Chen Chen    || Audio-visual Representation Learning  || [[媒体文件:Audio_visual_representation_learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/20  ||Haoran Sun   ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/27  ||Pengqi Li    || The important ”feature” for speaker recognition || [[媒体文件:The important ”feature” for speaker recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/10  ||Zixi Yan     || Paper Share || [[媒体文件:Paper_share_yzx0610.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/24  ||Renmiao Chen || Transformer in multimodal || [[媒体文件:Transformer_in_multimodal.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
|             ||             || ICASSP 2022 review || [[媒体文件:ICASSP2022_review.pdf]]  [[媒体文件:ICASSP-2022-readinglist.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/04  ||Chen Chen    || Video to Speech papers || [[媒体文件:VTS_cc.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/08  ||Ruihai Hou   || ICASSP 2022 review (part) || [[媒体文件:Weeklyreading_hrh.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/15  ||Sirui Li     || Towards End-to-end Unsupervised Speech Recognition || [[媒体文件:Towards_End_to_end_Unsupervised_Speech_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/22  ||Wan Lin      || AutoED: Text-independent unsupervised speaker recognition Model|| [[媒体文件:AutoED_spk_reg.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/29  ||Haoyu Jiang  || ArcFace_iQIYI-VID || [[媒体文件:ArcFace_iQIYI-VID.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/05  ||Chen Chen    || Recent advance in VTS task || [[媒体文件:RecentVTS.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/12  ||Tianhao Wang || Extremal Perturbations || [[媒体文件:Extremal_perturbations.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/19  ||Renmiao Chen || The correlation of face and vioce || [[媒体文件:The_correlation_of_face_and_vioce_CRM.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/02  ||Zixi Yan     || Non-Contrastive Self-supervised Learning || [[媒体文件:Non_contrastive_Self_supervised_Learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/09  ||Sirui Li     || Low Resource Speech Recognition || [[媒体文件:Low_Resource_Speech_Recognition_lsr_0909.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/16  ||Xipin Wei    || Controllable Multi-style Music Generation Model based on simple Contrastive Learning || [[媒体文件:Controllable_Multi_style_Music_Generation_Model_based_on_simple_Contrastive_learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/23  ||Haoyu Jiang  || Audio Visual Learning || [[媒体文件:Audio_Visual_Learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/30  ||Wan Lin      ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/07  ||Tianhao Wang ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/21  ||Chen Chen    ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/28  ||Pengqi Li    ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/11/04  ||Renmiao Chen ||  || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2022-09-23T12:21:29Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周五晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
|   ||  || PPT模板 ||[[媒体文件:Weeklyreading_template.rar]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun    || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen     || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang       || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang    || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi   || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han      || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li    || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang  ||  Unsupervised Audio-Visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang      || Inter Dataset Variability Compensation ||   [[媒体文件:Inter_dataset_variability_compensation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi  || One Shot VC || [[媒体文件:One_shot_VC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jingxin Shen || Thermal Speaking || [[媒体文件:Thermal_Speaking_2021.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/23  ||Sirui Li &amp;amp; Zixi Yan || Wav2vec-U Experimental Report || [[媒体文件:Wav2vec-U_experimental_report.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/20  ||Renmiao Chen || Is Someone Speaking? || [[媒体文件:Is_Someone_Speaking_Exploring_Long-term_Temporal_Features.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/28  ||Chen Chen    || WenetSpeech Introduction || [[媒体文件:WenetSpeech_Dataset_Introduction.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/10  ||Weida Liang  || Cycle-loss Exemplar Autoencoder || [[媒体文件:Cycle-loss_Exemplar_Autoencoder.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/17  ||吾买尔江      || Modulation Spectrum || [[媒体文件:Modulation_Spectrum.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/24  ||Chen Chen    || S-DCCRN || [[媒体文件:S-DCCRN_pdf.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/01  ||Pengqi Li    || GuidedMix: An on-the-fly data augmentation approach for robust speaker recognition system || [[媒体文件:201201-GuidedMix-LPQ.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/08  ||Renmiao Chen || Multimodal preson verification ||  [[媒体文件:Multimodal_preson_verification.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/15  ||Ruihai Hou   || Crossmodal clustered contrastive learning: Grounding of spoken language to gesture || [[媒体文件:Crossmodal_clustered_contrasti.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/29  ||Zixi Yan     || Capsules Network || [[媒体文件:Capsules_Network.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/05  ||Sirui Li     || Self-Supervised Learning for speech recognition with Intermediate layer supervision || [[媒体文件:SSL with Intermediate layer supervision.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/12  ||Weida Liang  || FragmentVC || [[媒体文件:FragmentVC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/19  ||Haoyu Jiang  || Multi-modality Associative Bridging through Memory: Speech Sound Recollected from Face Video || [[媒体文件:Multi-modality_Associative_Bridging_through_Memory.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/14  ||             || Interspeech 2021 Review || [[媒体文件:Interspeech_paper_review_min.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/16  ||Chen Chen    || Audio Visual HuBERT || [[媒体文件:AVHuBERT.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/04  ||Pengqi Li    || Study of Visualization || [[媒体文件:Visualization.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||Renmiao Chen || Can audio-visual integration strengthen robustness under multimodal attacks? || [[媒体文件:Audio-Visual_Robustness_Under_Multimodal_Attacks.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||吾买尔江      || Signal Separation || [[媒体文件:Signal_Separation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/18  ||Chen Chen    || Overview on Lip Reading and Audio-visual Speech Recognition || [[媒体文件:LipReadingAndAVSR.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/01  ||Ruihai Hou   || Scalable Identity-Oriented Speech Retrieval || [[媒体文件:Scalable_Identity-Oriented_Speech_Retrieval.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/08  ||Zixi Yan     || Wav2vec related papers share || [[媒体文件:Wav2vec_related_papers.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/22  ||Sirui Li     || Speech-Based Language Modelling || [[媒体文件:Speech-Based Language Modelling.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/29  ||Haoyu Jiang  || Models of Speaker Recognition || [[媒体文件:Models_of_Speaker_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/13  ||Chen Chen    || Audio-visual Representation Learning  || [[媒体文件:Audio_visual_representation_learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/20  ||Haoran Sun   ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/27  ||Pengqi Li    || The important ”feature” for speaker recognition || [[媒体文件:The important ”feature” for speaker recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/10  ||Zixi Yan     || Paper Share || [[媒体文件:Paper_share_yzx0610.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/24  ||Renmiao Chen || Transformer in multimodal || [[媒体文件:Transformer_in_multimodal.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
|             ||             || ICASSP 2022 review || [[媒体文件:ICASSP2022_review.pdf]]  [[媒体文件:ICASSP-2022-readinglist.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/04  ||Chen Chen    || Video to Speech papers || [[媒体文件:VTS_cc.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/08  ||Ruihai Hou   || ICASSP 2022 review (part) || [[媒体文件:Weeklyreading_hrh.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/15  ||Sirui Li     || Towards End-to-end Unsupervised Speech Recognition || [[媒体文件:Towards_End_to_end_Unsupervised_Speech_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/22  ||Wan Lin      || AutoED: Text-independent unsupervised speaker recognition Model|| [[媒体文件:AutoED_spk_reg.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/29  ||Haoyu Jiang  || ArcFace_iQIYI-VID || [[媒体文件:ArcFace_iQIYI-VID.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/05  ||Chen Chen    || Recent advance in VTS task || [[媒体文件:RecentVTS.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/12  ||Tianhao Wang || Extremal Perturbations || [[媒体文件:Extremal_perturbations.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/08/19  ||Renmiao Chen || The correlation of face and vioce || [[媒体文件:The_correlation_of_face_and_vioce_CRM.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/02  ||Zixi Yan     || Non-Contrastive Self-supervised Learning || [[媒体文件:Non_contrastive_Self_supervised_Learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/09  ||Sirui Li     || Low Resource Speech Recognition || [[媒体文件:Low_Resource_Speech_Recognition_lsr_0909.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/16  ||Xipin Wei    || Controllable Multi-style Music Generation Model based on simple Contrastive Learning || [[媒体文件:Controllable_Multi_style_Music_Generation_Model_based_on_simple_Contrastive_learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/23  ||Haoyu Jiang  || Audio_Visual_Learning || [[媒体文件:Audio_Visual_Learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/09/30  ||Wan Lin      ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/07  ||Tianhao Wang ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/21  ||Chen Chen    ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/10/28  ||Pengqi Li    ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/11/04  ||Renmiao Chen ||  || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Audio_Visual_Learning.pdf</id>
		<title>文件:Audio Visual Learning.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Audio_Visual_Learning.pdf"/>
				<updated>2022-09-23T12:20:33Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-09-19</id>
		<title>2022-09-19</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-09-19"/>
				<updated>2022-09-19T10:52:20Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Purify AIGraph 3rd part&lt;br /&gt;
* Feedback to AIBook review &lt;br /&gt;
* Review work for Interspeech doctorial forum and several journal papers&lt;br /&gt;
* Slides for speech course&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* PhD research proposal&lt;br /&gt;
* aishell course video&lt;br /&gt;
* Few-shot kws training&lt;br /&gt;
||&lt;br /&gt;
* research proposal ...&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish processing &amp;amp; annotating data&lt;br /&gt;
* Try VCAGAN, Lip2Wav, xTS training&lt;br /&gt;
* Finish preparing training data for VCAGAN, Lip2Wav, xTS&lt;br /&gt;
* Start draft&lt;br /&gt;
||&lt;br /&gt;
* train models&lt;br /&gt;
* finish Chinese [https://demo.hedgedoc.org/LDgt1VGhRAKjYz7Dzid-WA?view draft] &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Gradient-based methods&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Experimental data environment arrangement&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* End-to-end unsupervised ASR&lt;br /&gt;
* make a resume&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Check LiRAVD data&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Minority speech recognition paper&lt;br /&gt;
* release asr model &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* leave-one-out method (based on time-domain)&lt;br /&gt;
* postgraduate recommendation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* research speech CONTROLLABLE MULTI-STYLE MUSIC GENERATION MODEL BASED ON SIMPLE CONTRASTIVE LEARNING&lt;br /&gt;
||&lt;br /&gt;
* 1.model training;&lt;br /&gt;
* 2.experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-09-12</id>
		<title>2022-09-12</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-09-12"/>
				<updated>2022-09-12T10:55:31Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Polish AI-Graph&lt;br /&gt;
* ISCSLP shuffle paper&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* hard trials and shuffle papers.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* outline of my research proposal&lt;br /&gt;
* training data for multi-lingual kws&lt;br /&gt;
* the final aishell course&lt;br /&gt;
||&lt;br /&gt;
* write research proposal&lt;br /&gt;
* finish muli-lingual kws training and test on real test data(jiuming)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* gradient-based methods(integrated gradient(IG), LRP(1/2))&lt;br /&gt;
||&lt;br /&gt;
* Experiments and summary&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* LiRAVD data collection&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* weekly report&lt;br /&gt;
* submit paper&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Check LiRAVD data&lt;br /&gt;
* Find more people to collect&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* add new functions for collect web&lt;br /&gt;
* audit data&lt;br /&gt;
||&lt;br /&gt;
* audit data&lt;br /&gt;
* prepare for pipeline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Minority speech recognition paper&lt;br /&gt;
* Debug big model training for Aibabel&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* finish paper&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Tianhao Wang&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Xipin Wei&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-09-05</id>
		<title>2022-09-05</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-09-05"/>
				<updated>2022-09-05T10:55:14Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Simplify AI-graph&lt;br /&gt;
* CNSRC paper (part work)&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* integrate paper on hard trials (fast SVM).&lt;br /&gt;
* review CNC papers.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Investigating interpretability libraries&lt;br /&gt;
* Study interpretability in text classification&lt;br /&gt;
||&lt;br /&gt;
* gradient-based methods(integrated gradient(IG), LRP, DeepLiFT)&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Prepare papers to share&lt;br /&gt;
* LiRAVD data collection&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Check LiRAVD data&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
* Minority speech recognition paper&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Editor paper&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-29</id>
		<title>2022-08-29</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-29"/>
				<updated>2022-08-29T10:57:48Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI graph online resource done. &lt;br /&gt;
* Resume the work on the hard trials paper&lt;br /&gt;
&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* hard trials of CNC.E (SVM training...)&lt;br /&gt;
* review CNC series.&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Prepare papers to share&lt;br /&gt;
* kaldi-cslt&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Test on AveRobot and Voxceleb1&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
* Minority speech recognition paper &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Some Experiment&lt;br /&gt;
* Paper writing&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-22</id>
		<title>2022-08-22</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-22"/>
				<updated>2022-08-22T10:49:56Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI graph, online resource, part 4, nearly done (8 chapters missing)&lt;br /&gt;
* Design efficient computation for micro meganetic equation&lt;br /&gt;
&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* hard trials of CNC.E (SVM training...)&lt;br /&gt;
* trifles of BUPT projects&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* fix and re-config server ...&lt;br /&gt;
* fix bug of transformer kws&lt;br /&gt;
||&lt;br /&gt;
* continue work on transformer kws&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Help with data-collect tools&lt;br /&gt;
* Train xts on multi speaker dependent and independent task&lt;br /&gt;
||&lt;br /&gt;
* Retrain xts model or hifigan to match hparam&lt;br /&gt;
* Support data-collect progress&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Conference presentation ppt and poster&lt;br /&gt;
||&lt;br /&gt;
* presentation record &lt;br /&gt;
* max activation analysis&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Mongolian Kaldi-cslt project&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Data collection test&lt;br /&gt;
* Data set download&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-15</id>
		<title>2022-08-15</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-15"/>
				<updated>2022-08-15T11:02:21Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-graph online resource, 3rd part done.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Simulation on hard trials.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* transformer kws done waiting for run.&lt;br /&gt;
* fix server&lt;br /&gt;
* aishell course&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Reproduce VTS method [https://demo.hedgedoc.org/TjDwlDO2QJ2PQ55WRBShuw?view doc]&lt;br /&gt;
* Help with audit tools&lt;br /&gt;
||&lt;br /&gt;
* Add post-process to our dataset&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Pre-training and fine-tuning process for SSAST project&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Revise paper&lt;br /&gt;
||&lt;br /&gt;
* more experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Benchmark&lt;br /&gt;
* Data collection test&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish the collection pipeline&lt;br /&gt;
||&lt;br /&gt;
* Refine the collection pipeline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-15</id>
		<title>2022-08-15</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-15"/>
				<updated>2022-08-15T11:02:05Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-graph online resource, 3rd part done.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Simulation on hard trials.&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* transformer kws done waiting for run.&lt;br /&gt;
* fix server&lt;br /&gt;
* aishell course&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Reproduce VTS method [https://demo.hedgedoc.org/TjDwlDO2QJ2PQ55WRBShuw?view doc]&lt;br /&gt;
* Help with audit tools&lt;br /&gt;
||&lt;br /&gt;
* Add post-process to our dataset&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Pre-training and fine-tuning process for SSAST project&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Revise paper&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* benchmark&lt;br /&gt;
* data collection test&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish the collection pipeline&lt;br /&gt;
||&lt;br /&gt;
* Refine the collection pipeline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-08</id>
		<title>2022-08-08</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-08"/>
				<updated>2022-08-08T10:46:38Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*AI-graph online resource (part 2 done).&lt;br /&gt;
*Some review work for HLT and OCOCOSDA.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*hard trials of CNC.E&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*zeus kws tools coding&lt;br /&gt;
** THU energy kws  model done&lt;br /&gt;
** google few shot keywords for any language (instance learning) done&lt;br /&gt;
** samsung transformer based location guided attention  kws done(single head transformer)&lt;br /&gt;
* slide of aishell 7th course &lt;br /&gt;
||&lt;br /&gt;
* samsung transformer based location guided attention (multi-head)&lt;br /&gt;
* large scale kws training&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Weekly Reading report&lt;br /&gt;
* Collect News30minutes data [https://codimd.s3.shivering-isles.com/demo/uploads/3979c39f-7ef8-4752-8426-77ffadcae5bb.png img]&lt;br /&gt;
* Audit &amp;amp; collect web page demo&lt;br /&gt;
||&lt;br /&gt;
* Process News30minutes data&lt;br /&gt;
* collect open source vts code&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Finish first draft of the paper&lt;br /&gt;
||&lt;br /&gt;
* Revise the paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Compare AcrFace and Facenet&lt;br /&gt;
* Test AcrFace on Vox1&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-08-01</id>
		<title>2022-08-01</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-08-01"/>
				<updated>2022-08-01T10:51:07Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* APSIPA hard trials paper &lt;br /&gt;
* AI graph electronic resource (chapter 1 done)&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Submit hard-trials paper&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* aishell course video&lt;br /&gt;
* kws coding&lt;br /&gt;
||&lt;br /&gt;
* test code and prepare baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* [https://demo.hedgedoc.org/uUA45m4bTL-wFiESUy1VIA?view doc]&lt;br /&gt;
* Add SyncNet to pipeline&lt;br /&gt;
* Analyze drawbacks of current pipeline&lt;br /&gt;
||&lt;br /&gt;
* Reading recent two years paper on VTS&lt;br /&gt;
* Prepare and run pipeline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* paper outline&lt;br /&gt;
* kaldi script comments: gmm-hmm&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* weekly reading&lt;br /&gt;
* tests of DeepFace&lt;br /&gt;
||&lt;br /&gt;
* update pipeline code&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2022-07-29T12:42:32Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周五晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
|   ||  || PPT模板 ||[[媒体文件:Weeklyreading_template.rar]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun    || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen     || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang       || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang    || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi   || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han      || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li    || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang  ||  Unsupervised Audio-Visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang      || Inter Dataset Variability Compensation ||   [[媒体文件:Inter_dataset_variability_compensation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi  || One Shot VC || [[媒体文件:One_shot_VC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jingxin Shen || Thermal Speaking || [[媒体文件:Thermal_Speaking_2021.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/23  ||Sirui Li &amp;amp; Zixi Yan || Wav2vec-U Experimental Report || [[媒体文件:Wav2vec-U_experimental_report.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/20  ||Renmiao Chen || Is Someone Speaking? || [[媒体文件:Is_Someone_Speaking_Exploring_Long-term_Temporal_Features.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/28  ||Chen Chen    || WenetSpeech Introduction || [[媒体文件:WenetSpeech_Dataset_Introduction.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/10  ||Weida Liang  || Cycle-loss Exemplar Autoencoder || [[媒体文件:Cycle-loss_Exemplar_Autoencoder.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/17  ||吾买尔江      || Modulation Spectrum || [[媒体文件:Modulation_Spectrum.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/24  ||Chen Chen    || S-DCCRN || [[媒体文件:S-DCCRN_pdf.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/01  ||Pengqi Li    || GuidedMix: An on-the-fly data augmentation approach for robust speaker recognition system || [[媒体文件:201201-GuidedMix-LPQ.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/08  ||Renmiao Chen || Multimodal preson verification ||  [[媒体文件:Multimodal_preson_verification.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/15  ||Ruihai Hou   || Crossmodal clustered contrastive learning: Grounding of spoken language to gesture || [[媒体文件:Crossmodal_clustered_contrasti.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/29  ||Zixi Yan     || Capsules Network || [[媒体文件:Capsules_Network.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/05  ||Sirui Li     || Self-Supervised Learning for speech recognition with Intermediate layer supervision || [[媒体文件:SSL with Intermediate layer supervision.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/12  ||Weida Liang  || FragmentVC || [[媒体文件:FragmentVC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/19  ||Haoyu Jiang  || Multi-modality Associative Bridging through Memory: Speech Sound Recollected from Face Video || [[媒体文件:Multi-modality_Associative_Bridging_through_Memory.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/14  ||             || Interspeech 2021 Review || [[媒体文件:Interspeech_paper_review_min.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/16  ||Chen Chen    || Audio Visual HuBERT || [[媒体文件:AVHuBERT.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/04  ||Pengqi Li    || Study of Visualization || [[媒体文件:Visualization.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||Renmiao Chen || Can audio-visual integration strengthen robustness under multimodal attacks? || [[媒体文件:Audio-Visual_Robustness_Under_Multimodal_Attacks.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||吾买尔江      || Signal Separation || [[媒体文件:Signal_Separation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/18  ||Chen Chen    || Overview on Lip Reading and Audio-visual Speech Recognition || [[媒体文件:LipReadingAndAVSR.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/01  ||Ruihai Hou   || Scalable Identity-Oriented Speech Retrieval || [[媒体文件:Scalable_Identity-Oriented_Speech_Retrieval.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/08  ||Zixi Yan     || Wav2vec related papers share || [[媒体文件:Wav2vec_related_papers.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/22  ||Sirui Li     || Speech-Based Language Modelling || [[媒体文件:Speech-Based Language Modelling.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/29  ||Haoyu Jiang  || Models of Speaker Recognition || [[媒体文件:Models_of_Speaker_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/13  ||Chen Chen    || Audio-visual Representation Learning  || [[媒体文件:Audio_visual_representation_learning.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/20  ||Haoran Sun   ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/27  ||Pengqi Li    || The important ”feature” for speaker recognition || [[媒体文件:The important ”feature” for speaker recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/10  ||Zixi Yan     || Paper Share || [[媒体文件:Paper_share_yzx0610.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/06/24  ||Renmiao Chen || Transformer in multimodal || [[媒体文件:Transformer_in_multimodal.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
|             ||             || ICASSP 2022 review || [[媒体文件:ICASSP2022_review.pdf]]  [[媒体文件:ICASSP-2022-readinglist.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/04  ||Chen Chen    || Video to Speech papers || [[媒体文件:VTS_cc.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/08  ||Ruihai Hou   || ICASSP 2022 review (part) || [[媒体文件:Weeklyreading_hrh.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/15  ||Sirui Li     || Towards End-to-end Unsupervised Speech Recognition || [[媒体文件:Towards_End_to_end_Unsupervised_Speech_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/22  ||Wan Lin      || AutoED: Text-independent unsupervised speaker recognition Model|| [[媒体文件:AutoED_spk_reg.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/07/29  ||Haoyu Jiang  || ArcFace_iQIYI-VID || [[媒体文件:ArcFace_iQIYI-VID.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
|             ||Chen Chen    ||  || &lt;br /&gt;
|-&lt;br /&gt;
|             ||Renmiao Chen ||  || &lt;br /&gt;
|-&lt;br /&gt;
|             ||Ruihai Hou   ||  ||&lt;br /&gt;
|-&lt;br /&gt;
|             ||Zixi Yan     ||  || &lt;br /&gt;
|-&lt;br /&gt;
|             ||Sirui Li     ||  || &lt;br /&gt;
|-&lt;br /&gt;
|             ||Haoyu Jiang  ||  || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:ArcFace_iQIYI-VID.pdf</id>
		<title>文件:ArcFace iQIYI-VID.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:ArcFace_iQIYI-VID.pdf"/>
				<updated>2022-07-29T12:41:52Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-07-25</id>
		<title>2022-07-25</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-07-25"/>
				<updated>2022-07-25T10:56:26Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-graph online resource &lt;br /&gt;
* Submit PAMI paper&lt;br /&gt;
* Reformate hard-trials paper&lt;br /&gt;
* Formate CNSRC paper&lt;br /&gt;
&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*Paper on hard trials&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* coding&lt;br /&gt;
* aishell course video&lt;br /&gt;
||&lt;br /&gt;
* finish coding&lt;br /&gt;
* transformer keyword spotting baseline&lt;br /&gt;
* instance keyword baseline &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* [https://demo.hedgedoc.org/uUA45m4bTL-wFiESUy1VIA?view doc]&lt;br /&gt;
* Finish data_collector version 0.1&lt;br /&gt;
* Processed two mini batch&lt;br /&gt;
||&lt;br /&gt;
* Manually evaluate generated videos and finetune data_collector&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* study some post-hoc method&lt;br /&gt;
||&lt;br /&gt;
* review and summarize&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Read SSAST: Self-Supervised Audio Spectrogram Transformer&lt;br /&gt;
* Wav2vec multi-layer feature combination research and system design&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* sv task: use wav2vec hubert mfcc feature&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Tests of InsightFace&lt;br /&gt;
||&lt;br /&gt;
* Solve face detection model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Determine our plan&lt;br /&gt;
* Finish audio_drived_data_collector version 0.1&lt;br /&gt;
* Anaylze some example&lt;br /&gt;
* Design strategies for collection&lt;br /&gt;
* [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/d/d8/220725-Audio_driven%E5%88%9D%E6%AD%A5%E6%83%B3%E6%B3%95.pdf conclusion &amp;amp; preliminary ideas]&lt;br /&gt;
||&lt;br /&gt;
* Finish video-drived collection data collector&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* English acoustic model upgrade&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Wan Lin&lt;br /&gt;
|| &lt;br /&gt;
* Test the method mentioned in the last report&lt;br /&gt;
* Papers on noise filtering&lt;br /&gt;
||&lt;br /&gt;
* Find ways to improve model adaptability&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-27</id>
		<title>2022-06-27</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-27"/>
				<updated>2022-06-27T10:18:43Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* ICASSP 2022 review&lt;br /&gt;
||&lt;br /&gt;
* Reproduce some VSR models&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Review ICASSP 2022 paper&lt;br /&gt;
* Unsupervised ASR on TIMIT and Librispeech&lt;br /&gt;
||&lt;br /&gt;
* Go on unsupervised ASR&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Review ICASSP 2022 paper&lt;br /&gt;
* AV-CNC face detecting&lt;br /&gt;
||&lt;br /&gt;
* Face verification test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare for paper sharing&lt;br /&gt;
||&lt;br /&gt;
* Read some papers about speaker recognition&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* ICASSP 2022 review&lt;br /&gt;
* Event detection experiment about Coughing sound&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-13</id>
		<title>2022-06-13</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-13"/>
				<updated>2022-06-13T10:43:19Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* school project proposal&lt;br /&gt;
* baseline: Few shot kws&lt;br /&gt;
||&lt;br /&gt;
* continue on kws baseline &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review cnc code, write documents&lt;br /&gt;
||&lt;br /&gt;
* ICASSP review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* PUFA report, code, model&lt;br /&gt;
||&lt;br /&gt;
* Augment and mask&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* datasets-CMU_Wilderness&lt;br /&gt;
* Preparation for Paper Sharing&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Unsupervised ASR experiments&lt;br /&gt;
||&lt;br /&gt;
* Unsupervised ASR experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Test on face verification&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* prepare for the final exam&lt;br /&gt;
||&lt;br /&gt;
* prepare for the final exam&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* Research on new projects&lt;br /&gt;
||&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-06-06</id>
		<title>2022-06-06</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-06-06"/>
				<updated>2022-06-06T11:00:57Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AIGraph-4 (1/2)&lt;br /&gt;
* AIShell course recording&lt;br /&gt;
* TASLP paper ready for submission&lt;br /&gt;
||&lt;br /&gt;
* TASLP submission&lt;br /&gt;
* AIGraph-4 done&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (CN-Celeb3)&lt;br /&gt;
* PUFA report (1/2)&lt;br /&gt;
||&lt;br /&gt;
* PUFA delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Location guided attention kws (Samsung's work in interspeech 2021)&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
* kws baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* quantitative evaluation for speaker embedding&lt;br /&gt;
* cycleflow code&lt;br /&gt;
||&lt;br /&gt;
* more explorations for speaker embedding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb data&lt;br /&gt;
||&lt;br /&gt;
* Update CNCeleb dataset toolkits &amp;amp; docs&lt;br /&gt;
* Course pres&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* PUFA report&lt;br /&gt;
* CN3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Preparation for Paper Sharing&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Generate phone-level alignment labels&lt;br /&gt;
* CNC3&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb data&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* CNCeleb3 data&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish some course reports&lt;br /&gt;
||&lt;br /&gt;
* Finish course project and pre&lt;br /&gt;
* prepare for test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* CN3&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-05-30</id>
		<title>2022-05-30</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-05-30"/>
				<updated>2022-05-30T10:48:43Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-Graph Part3 done. &lt;br /&gt;
* Some revision on TASLP cycleflow paper&lt;br /&gt;
||&lt;br /&gt;
* AI-Graph part4&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (CN-Celeb3, INFO)&lt;br /&gt;
* Posdoc defense/trifle&lt;br /&gt;
||&lt;br /&gt;
* PUFA report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* finish kws RNNT coding&lt;br /&gt;
* Few shot keyword spotting in any language&lt;br /&gt;
||&lt;br /&gt;
* Location guided attention kws (Samsung's work in interspeech 2021)&lt;br /&gt;
* QbE&lt;br /&gt;
* tuning all the baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Collecting celeb data&lt;br /&gt;
||&lt;br /&gt;
* Processing celeb data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*weekly reading&lt;br /&gt;
*PUFA report&lt;br /&gt;
||&lt;br /&gt;
*argument and detection&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Tool script Refactoring&lt;br /&gt;
* Read ANALYZING THE ROBUSTNESS OF UNSUPERVISED SPEECH RECOGNITION&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Collecting celeb data&lt;br /&gt;
||&lt;br /&gt;
* Processing celeb data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* do course project&lt;br /&gt;
* prepare for examination&lt;br /&gt;
||&lt;br /&gt;
* do course project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-05-23</id>
		<title>2022-05-23</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-05-23"/>
				<updated>2022-05-23T10:54:19Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* AI-Graph Part3 (1/2) completed&lt;br /&gt;
* Keep on review for interpretation in ASR/SID&lt;br /&gt;
||&lt;br /&gt;
* AI-Graph Part3 (2/2) &lt;br /&gt;
* Keep on review for interpretation in ASR/SID&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Course projects&lt;br /&gt;
* 1of3 website&lt;br /&gt;
* CNCeleb pipeline&lt;br /&gt;
||&lt;br /&gt;
* CNCeleb&lt;br /&gt;
* Course projects&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*Some project task&lt;br /&gt;
*PUFA report&lt;br /&gt;
||&lt;br /&gt;
*Finish PUFA report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Tibetan speech recognition experiment based on WAV2VEC feature(debug)&lt;br /&gt;
* Read paper Improving Speech Recognition for Indic Languages using Language Model&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Face verification test&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* concurrent testing of speaker-diarization&lt;br /&gt;
* calculate RT of speaker-diarization&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
* CNCeleb&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-05-16</id>
		<title>2022-05-16</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-05-16"/>
				<updated>2022-05-16T10:41:13Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Graph-AI book&lt;br /&gt;
* Review paper for interpreability for speech processing&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* reproduce Google attention RNN-T system&lt;br /&gt;
||&lt;br /&gt;
* continue work on attention RNN-T&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
*Some project task&lt;br /&gt;
||&lt;br /&gt;
*Finish PUFA report&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Tibetan speech recognition experiment based on MFCC&lt;br /&gt;
* Tibetan speech recognition experiment based on WAV2VEC feature&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Finish speaker verification test&lt;br /&gt;
* Data preparation for face verification&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* deliver the first version of segTool&lt;br /&gt;
* setup the experiment environment of LSH&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* do course projects&lt;br /&gt;
||&lt;br /&gt;
* do course projects&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
|Wenqiang Du&lt;br /&gt;
|| &lt;br /&gt;
* aishell KALDI course&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
||&lt;br /&gt;
* Continue aishell KALDI course&lt;br /&gt;
* Some project cooperation&lt;br /&gt;
* Research on abusive sound detection&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/Weekly_reading</id>
		<title>Weekly reading</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/Weekly_reading"/>
				<updated>2022-04-29T14:00:17Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;br /&gt;
'''清华大学语音语言中心内部学习会&lt;br /&gt;
&lt;br /&gt;
'''时间： 每周五晚19:30'''&lt;br /&gt;
&lt;br /&gt;
'''地点： 1区303'''&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
! Date !! Speaker!! Title !! Materials &lt;br /&gt;
|-&lt;br /&gt;
| 2021/04/01  ||Haoran Sun  || Zeus code regularization ||[[媒体文件:代码规范.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/20  ||Chen Chen   || Overview of speech enhancement|| [[媒体文件:Speech_enhancement.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/05/27  ||Di Wang  || Secret of 'hard trials' || [[媒体文件:Secret_of_hard_trials.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/10  ||Jingxin Shen  ||Expriments about thermal to RGB face synthesis with cycleGan and pix2pix || [[媒体文件:Expriments about thermal to RGB face synthesis with cycleGan and pix2pix.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/06/17  ||Yang Zhang || NIPS2020: Long-Tailed Classification by Keeping the Good and Removing the Bad Momentum Causal Effect || [[媒体文件:long-tail.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/08  ||Tiankai Zhi || Some experiments on stargan ||[[媒体文件:Some experiments on stargan.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/15  ||Jiao Han || MG experiments based on ASV system || [[媒体文件:MG experiments based on ASV system..pptx]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/22  ||Zixi Yan &amp;amp; Sirui Li || Unsupervised Speech Recognition || [[媒体文件:Unsupervised_Speech_Recognition.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/07/29  ||Pengqi Li || A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML || [[媒体文件:A Simulation Study on 􏰛􏰜 Ro􏰛bust MAML.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Qingyang Zhu || Noise-aware method for Speech Enhancement || [[媒体文件:Noise-aware method for Speech Enhancement.pdf]] &lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/12  ||Weida Liang ||  Unsupervised Audio-Visual Synthesis via Exemplar Autoencoders  ||  [[媒体文件:Bi-weekly_report_Liangwd.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/08/19  ||Di Wang || Inter Dataset Variability Compensation ||   [[媒体文件:Inter_dataset_variability_compensation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/02  ||Tiankai Zhi || One Shot VC || [[媒体文件:One_shot_VC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/09  ||Jingxin Shen || Thermal Speaking || [[媒体文件:Thermal_Speaking_2021.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/09/23  ||Sirui Li &amp;amp; Zixi Yan || Wav2vec-U Experimental Report || [[媒体文件:Wav2vec-U_experimental_report.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/20  ||Renmiao Chen|| Is Someone Speaking? || [[媒体文件:Is_Someone_Speaking_Exploring_Long-term_Temporal_Features.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/10/28  ||Chen Chen || WenetSpeech Introduction || [[媒体文件:WenetSpeech_Dataset_Introduction.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/10  ||Weida Liang || Cycle-loss Exemplar Autoencoder || [[媒体文件:Cycle-loss_Exemplar_Autoencoder.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/17  ||吾买尔江 || Modulation Spectrum || [[媒体文件:Modulation_Spectrum.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/11/24  ||Chen Chen || S-DCCRN || [[媒体文件:S-DCCRN_pdf.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/01  ||Pengqi Li || GuidedMix: An on-the-fly data augmentation approach for robust speaker recognition system || [[媒体文件:201201-GuidedMix-LPQ.pdf ‎]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/08  ||Renmiao Chen || Multimodal preson verification ||  [[媒体文件:Multimodal_preson_verification.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/15  ||Ruihai Hou || Crossmodal clustered contrastive learning: Grounding of spoken language to gesture || [[媒体文件:Crossmodal_clustered_contrasti.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2021/12/29  ||Zixi Yan || Capsules Network || [[媒体文件:Capsules_Network.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/05  ||Sirui Li || Self-Supervised Learning for speech recognition with Intermediate layer supervision || [[媒体文件:SSL with Intermediate layer supervision.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/12  ||Weida Liang || FragmentVC || [[媒体文件:FragmentVC.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/01/19  ||Haoyu Jiang || Multi-modality Associative Bridging through Memory: Speech Sound Recollected from Face Video || [[媒体文件:Multi-modality_Associative_Bridging_through_Memory.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/14  || || Interspeech 2021 Review || [[媒体文件:Interspeech_paper_review_min.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/02/16  ||Chen Chen || Audio Visual HuBERT || [[媒体文件:AVHuBERT.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/04  ||Pengqi Li || Study of Visualization || [[媒体文件:Visualization.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||Renmiao Chen || Can audio-visual integration strengthen robustness under multimodal attacks? || [[媒体文件:Audio-Visual_Robustness_Under_Multimodal_Attacks.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/11  ||吾买尔江 || Signal Separation || [[媒体文件:Signal_Separation.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/03/18  ||Chen Chen || Overview on Lip Reading and Audio-visual Speech Recognition || [[媒体文件:LipReadingAndAVSR.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/01  ||Ruihai Hou || Scalable Identity-Oriented Speech Retrieval || [[媒体文件:Scalable_Identity-Oriented_Speech_Retrieval.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/08  ||Zixi Yan ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/15  ||Sirui Li || Speech-Based Language Modelling || [[媒体文件:Speech-Based Language Modelling.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/22  ||Weida Liang ||  || &lt;br /&gt;
|-&lt;br /&gt;
| 2022/04/29  ||Haoyu Jiang || Models of Speaker Recognition || [[媒体文件:Models_of_Speaker_Recognition.pdf]]&lt;br /&gt;
|-&lt;br /&gt;
| 2022/05/06  ||Chen Chen ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Pengqi Li ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Renmiao Chen ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Ruihai Hou ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Zixi Yan ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Sirui Li ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Weida Liang ||  || &lt;br /&gt;
|-&lt;br /&gt;
|   ||Haoyu Jiang ||  || &lt;br /&gt;
|-&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Old readings|Past Events]]&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Models_of_Speaker_Recognition.pdf</id>
		<title>文件:Models of Speaker Recognition.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Models_of_Speaker_Recognition.pdf"/>
				<updated>2022-04-29T13:59:13Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-25</id>
		<title>2022-04-25</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-25"/>
				<updated>2022-04-25T11:05:59Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Keep on visualization review &lt;br /&gt;
||&lt;br /&gt;
* Complete the visualization review&lt;br /&gt;
* More on graphical AI&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Posdoc report (4/5)&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investigate CNN channel [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=854 cvss]&lt;br /&gt;
||&lt;br /&gt;
* continue work on CNN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* l1 adv loss&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Pre-process LRW-1000 dataset and test pretrain model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Project&lt;br /&gt;
||&lt;br /&gt;
* Project (Done)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Noss experimental system construction&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
* Generate lexicon  with 10 Clusters&lt;br /&gt;
||&lt;br /&gt;
* Remove speaker information for phonetic classification&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Read paper about these model&lt;br /&gt;
* Test on VGG model&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* continue organizing the code structure for speaker diarization&lt;br /&gt;
* add time license&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Read some paper about cross-modal&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-25</id>
		<title>2022-04-25</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-25"/>
				<updated>2022-04-25T10:51:30Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Posdoc report (4/5)&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investigate CNN channel [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=854 cvss]&lt;br /&gt;
||&lt;br /&gt;
* continue work on CNN&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* l1 adv loss&lt;br /&gt;
||&lt;br /&gt;
* subjective evaluation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Pre-process LRW-1000 dataset and test pretrain model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Noss experimental system construction&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
* Generate lexicon  with 10 Clusters&lt;br /&gt;
||&lt;br /&gt;
* Remove speaker information for phonetic classification&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* read paper about these model&lt;br /&gt;
* test on VGG model&lt;br /&gt;
||&lt;br /&gt;
* go on test&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* continue organizing the code structure for speaker diarization&lt;br /&gt;
* add time license&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-18</id>
		<title>2022-04-18</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-18"/>
				<updated>2022-04-18T11:09:21Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review paper on speech visalization&lt;br /&gt;
||&lt;br /&gt;
* Keep on visualization review paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Toolkit/QA/ECAPA)&lt;br /&gt;
* Posdoc report (2/5)&lt;br /&gt;
* ICASSP 2022 presentation&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* School project proposal&lt;br /&gt;
||&lt;br /&gt;
* back to zeus/kws&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* The dissertation&lt;br /&gt;
||&lt;br /&gt;
* Cycle paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Project(1/3)&lt;br /&gt;
* study attention&lt;br /&gt;
||&lt;br /&gt;
* Project&lt;br /&gt;
* study methods of pooling related to attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Read the paper, What all do audio transformer models hear? Probing Acoustic Representations for language delivery and its structure&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* ABX score for Librispeech&lt;br /&gt;
||&lt;br /&gt;
* Generate lexicon with CTC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Try to test the baseline with a uniform code&lt;br /&gt;
||&lt;br /&gt;
* Read papers about the baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Organize the code structure for speaker diarization&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
* read some papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-18</id>
		<title>2022-04-18</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-18"/>
				<updated>2022-04-18T10:57:59Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Review paper on speech visalization&lt;br /&gt;
||&lt;br /&gt;
* Keep on visualization review paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Toolkit/QA/ECAPA)&lt;br /&gt;
* Posdoc report (2/5)&lt;br /&gt;
* ICASSP 2022 presentation&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Template)&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report (Done)&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* School project proposal&lt;br /&gt;
||&lt;br /&gt;
* back to zeus/kws&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* The dissertation&lt;br /&gt;
||&lt;br /&gt;
* Cycle paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
||&lt;br /&gt;
* Review AVSR &amp;amp; LipReading papers&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* Project(1/3)&lt;br /&gt;
* study attention&lt;br /&gt;
||&lt;br /&gt;
* Project&lt;br /&gt;
* study methods of pooling related to attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* ABX score for Librispeech&lt;br /&gt;
||&lt;br /&gt;
* Generate lexicon with CTC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Try to test the baseline with a uniform code&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Organize the code structure for speaker diarization&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
||&lt;br /&gt;
* Prepare course quiz &amp;amp; project&lt;br /&gt;
* read some papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-04-11</id>
		<title>2022-04-11</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-04-11"/>
				<updated>2022-04-11T10:48:51Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (refresh SOTA model)&lt;br /&gt;
* Complete Spoof overview&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
* Posdoc report&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Zeus/kws&lt;br /&gt;
** CNN ASR&lt;br /&gt;
** IBM Double AE&lt;br /&gt;
** THU energy model&lt;br /&gt;
** Google Rnn-t (in progress)&lt;br /&gt;
** QbE (in progress)&lt;br /&gt;
** 4 kaldi kws recipe(in progress)&lt;br /&gt;
||&lt;br /&gt;
* Verify CNN &lt;br /&gt;
* continue on zeus/kws &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* update cycleflow website&lt;br /&gt;
* review unsupervised AVSR papers&lt;br /&gt;
||&lt;br /&gt;
* write review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* paper's web and code&lt;br /&gt;
||&lt;br /&gt;
* project&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* web and code for paper&lt;br /&gt;
* Presentation &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Wav2vec paper review preparation&lt;br /&gt;
* Cos sim experimental data collation&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Data preprocessing&lt;br /&gt;
* Learn the code for data loading&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* continue the test on LSH&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* do some work about OCR&lt;br /&gt;
||&lt;br /&gt;
* continue the task on OCR&lt;br /&gt;
* read papers&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-28</id>
		<title>2022-03-28</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-28"/>
				<updated>2022-03-28T10:54:09Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Interspeech refinement&lt;br /&gt;
* Keyword spotting review&lt;br /&gt;
||&lt;br /&gt;
* Keyword spotting review&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Spoof overview&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Continue work on Speech engrave (TXT convolution)&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare ASR/OCR model for ICPRMSR&lt;br /&gt;
* Reading paper of lip2wav&lt;br /&gt;
||&lt;br /&gt;
* Review recent papers about Lip Reading &amp;amp; AVSR&lt;br /&gt;
* Prepare ASR/OCR model for ICPRMSR&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* INTERSPEECH&lt;br /&gt;
||&lt;br /&gt;
* code, homepage for paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Cos sim experiment of WAV2VEC feature and MFCC feature&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Dictionary generation for true and pseudo-labeled&lt;br /&gt;
||&lt;br /&gt;
* Downstream ASR Task&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Test on SOTA model&lt;br /&gt;
* Learn score normalization&lt;br /&gt;
||&lt;br /&gt;
* Go on test&lt;br /&gt;
* Test with s-norm&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Build experiment environment on the server&lt;br /&gt;
* Extract embeddings of 2793 speakers&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-21</id>
		<title>2022-03-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-21"/>
				<updated>2022-03-21T11:04:58Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Rewrite the TASLP RC paper&lt;br /&gt;
* Design new architecture for speech engrave&lt;br /&gt;
||&lt;br /&gt;
* Interspeech paper polishment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* continue proposals&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system and Leaderboard Open)&lt;br /&gt;
* PUFA project delivery [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=832 cvss]&lt;br /&gt;
* Sunine update&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for cycle loss&lt;br /&gt;
* website of cycleflow&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
||&lt;br /&gt;
* Review Mandrain Lip Reading Datasets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* more Experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* dictionary generation and ASR task&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Collect SOTA pre-training model and test on AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Go on test and collect&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* finished plda model&lt;br /&gt;
||&lt;br /&gt;
* test the SOTA model for face recognition&lt;br /&gt;
* explore the means to calculate the confidence of audio&lt;br /&gt;
* do some experiments for different confidence to fuse the audio and face&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-21</id>
		<title>2022-03-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-21"/>
				<updated>2022-03-21T11:00:46Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Rewrite the TASLP RC paper&lt;br /&gt;
* Design new architecture for speech engrave&lt;br /&gt;
||&lt;br /&gt;
* Interspeech paper polishment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Submission system and Leaderboard Open)&lt;br /&gt;
* PUFA project delivery [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=lilt&amp;amp;step=view_request&amp;amp;cvssid=832 cvss]&lt;br /&gt;
* Sunine update&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* INTERSPEECH 2022  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* experiments for cycle loss&lt;br /&gt;
* website of cycleflow&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare weekly reading report&lt;br /&gt;
||&lt;br /&gt;
* Review Mandrain Lip Reading Datasets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* more Experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* Interspeech&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* Go on dictionary generation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Collecte SOTA pre-training model and test on AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Go on&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* finished plda model&lt;br /&gt;
||&lt;br /&gt;
* test the SOTA model for face recognition&lt;br /&gt;
* explore the means to calculate the confidence of audio&lt;br /&gt;
* do some experiments for different confidence to fuse the audio and face&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-14</id>
		<title>2022-03-14</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-14"/>
				<updated>2022-03-14T10:53:50Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* revise and submit NSFC project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
* Speech engrave with enhancement loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/61/Engrave_recover_result.pdf here]&lt;br /&gt;
||&lt;br /&gt;
* Continue work on speech engrave: Design more smart mask mechanisms&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* results of CycleVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/0/05/CycleVCd128.pdf pdf]&lt;br /&gt;
* tools for vc&lt;br /&gt;
||&lt;br /&gt;
* paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Prepare report of Lip Reading &amp;amp; AVSR&lt;br /&gt;
||&lt;br /&gt;
* Reproduce experiments of reviewed methods&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* The WAV2VEC model trained by Librispeech dataset was used for Chinese Asr experiments&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Phoneme set discovery and dictionary generation&lt;br /&gt;
||&lt;br /&gt;
* Improve phoneme translation system&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Data statistics of AV-CN-Celeb&lt;br /&gt;
||&lt;br /&gt;
* Design SOTA model test experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Test top-N and speed on binary embedding&lt;br /&gt;
||&lt;br /&gt;
* Prepare paper sharing&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* redo for MI and CKA&lt;br /&gt;
* achieve dePLDA&lt;br /&gt;
||&lt;br /&gt;
* improve dePLDA&lt;br /&gt;
* do some experiment for score fusion&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Dataset.zip</id>
		<title>文件:Dataset.zip</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Dataset.zip"/>
				<updated>2022-03-08T08:33:46Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：AV-CN-Celeb数据列表&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;AV-CN-Celeb数据列表&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-03-07</id>
		<title>2022-03-07</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-03-07"/>
				<updated>2022-03-07T10:58:30Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Odyssey papers done&lt;br /&gt;
* Trans RC paper draft done&lt;br /&gt;
||&lt;br /&gt;
* More literature review for the Trans TC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* NSCF revising&lt;br /&gt;
||&lt;br /&gt;
*write popular science project  &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Release SR.eval and C-P map)&lt;br /&gt;
* Finish Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
* PUFA project delivery&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Attention based Speech engrave&lt;br /&gt;
** adversarial learning&lt;br /&gt;
** Gaussian graver&lt;br /&gt;
** Garbage node training&lt;br /&gt;
* Speech engrave with alignment&lt;br /&gt;
** garbage node training&lt;br /&gt;
||&lt;br /&gt;
* Test Speech engrave with alignment&lt;br /&gt;
** search decoding&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* the odyssey paper&lt;br /&gt;
* cycle and adversarial training on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/14/CycleVC.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* CycleVC adjustment&lt;br /&gt;
* Cycle loss plus adversarial loss on AutoVC&lt;br /&gt;
* experiments for RC paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Try finetune &amp;amp; pretrain of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* experiments on visualization &lt;br /&gt;
||&lt;br /&gt;
* check and reconstruct experiments&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Do experiments on MOSES system&lt;br /&gt;
||&lt;br /&gt;
* Prepare phrase-based MT training data&lt;br /&gt;
* train phrase-based MT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Finish speaker recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
* Do face recognition training on AV-CNCeleb &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* test binary embedding performance (mAP, speed) on speaker retrieval task&lt;br /&gt;
||&lt;br /&gt;
* do some tests on AE&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* learn and try to use dePLDA&lt;br /&gt;
||&lt;br /&gt;
* finish task of dePLDA&lt;br /&gt;
* prepare for sharing&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-28</id>
		<title>2022-02-28</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-28"/>
				<updated>2022-02-28T10:40:55Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Investigation on IB/VC &lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
* NCFS project&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
||&lt;br /&gt;
* Push CNCSRC (Update Sunine)&lt;br /&gt;
* Finish ASVSpoof response&lt;br /&gt;
* Polish C-P Map&lt;br /&gt;
||&lt;br /&gt;
* Odyssey paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Done: Prepare data &amp;amp; environment for experiments of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Do experimrnts of AV-HuBERT&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model &lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Find and learn the mose system&lt;br /&gt;
||&lt;br /&gt;
* Train and test the mose system&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Train the CN-Celeb baseline&lt;br /&gt;
||&lt;br /&gt;
* Go on training&lt;br /&gt;
* Prepare data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* finish speaker diarization interface and generate diarization figure&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* test and adjust single-modality modal&lt;br /&gt;
||&lt;br /&gt;
* learn and use decoupled PLDA for cross-modal test&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Acc.pdf</id>
		<title>文件:Acc.pdf</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Acc.pdf"/>
				<updated>2022-02-21T12:08:48Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-21</id>
		<title>2022-02-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-21"/>
				<updated>2022-02-21T11:20:07Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Experiment on IB control with conditional model [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=wangd&amp;amp;step=view_request&amp;amp;cvssid=847], rough conclusions were obtained. &lt;br /&gt;
* Refine the AV speaker recognition theoretical part.&lt;br /&gt;
* Review for ICME.&lt;br /&gt;
||&lt;br /&gt;
* Complete ICME review&lt;br /&gt;
* VQMIVC reproduction, update with random mask&lt;br /&gt;
* Some missing papers treatment: (1) true nonlinear LDA (2) CycleFlow (3) Thermal-visual database&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*NSFC Application&lt;br /&gt;
*Materials inverse design investigation&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Data release and SR baseline)&lt;br /&gt;
* Submit Tencent AI Lab project&lt;br /&gt;
* Submit M2ASR concluding report&lt;br /&gt;
* Write ASVSpoof response&lt;br /&gt;
||&lt;br /&gt;
* Submit ASVSpoof response&lt;br /&gt;
* Finish Draft of C-P Map paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
* M2ASR final report&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* autoVC with cycle loss [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/16/Autovc-cyc.pdf pdf] [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/6/66/Pre.rar demo]&lt;br /&gt;
||&lt;br /&gt;
* cycle loss after adverserial training&lt;br /&gt;
* VQMIVC&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Prepare data &amp;amp; environment for experiments of AV-Hubert&lt;br /&gt;
||&lt;br /&gt;
* &amp;lt;-- keep doing these tasks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* collated the visualization methods that have been reproduced&lt;br /&gt;
* some scripts for baseline(cncsrc)&lt;br /&gt;
||&lt;br /&gt;
* study feature aggregation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* 3~6 spk cycle loss models on wav2vec+seq2seq model&lt;br /&gt;
* Rewrite paper and focus on cycle loss&lt;br /&gt;
||&lt;br /&gt;
* Finish paper framework&lt;br /&gt;
* Push test on WER scoring&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Multi-language W2V model features were used for ASR experiments and compared with traditional MFCC features&lt;br /&gt;
||&lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Make an experiment plan&lt;br /&gt;
* Read the HuBERT paper and code&lt;br /&gt;
||&lt;br /&gt;
* Finish the hubert-U framework&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*Find the baseline for CN-Celeb speaker identification&lt;br /&gt;
||&lt;br /&gt;
*Train this baseline and find face recognition baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Check CKA&lt;br /&gt;
* Do experiment for gender&lt;br /&gt;
||&lt;br /&gt;
* Do experiment for cross-modal PLDA&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-02-21</id>
		<title>2022-02-21</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-02-21"/>
				<updated>2022-02-21T10:42:11Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC (Data release and SR baseline)&lt;br /&gt;
* Submit Tencent AI Lab project&lt;br /&gt;
* Submit M2ASR concluding report&lt;br /&gt;
* Write ASVSpoof response&lt;br /&gt;
||&lt;br /&gt;
* Submit ASVSpoof response&lt;br /&gt;
* Finish Draft of C-P Map paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
* M2ASR final report&lt;br /&gt;
||&lt;br /&gt;
* Speech engrave on overlap speech data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Review papers about lip-reading &amp;amp; audio-visual speech recognization&lt;br /&gt;
* Prepare data &amp;amp; environment for experiments of AV-Hubert&lt;br /&gt;
||&lt;br /&gt;
* &amp;lt;-- keep doing these tasks&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* 3~6 spk cycle loss models on wav2vec+seq2seq model&lt;br /&gt;
* Rewrite paper and focus on cycle loss&lt;br /&gt;
||&lt;br /&gt;
* Finish paper framework&lt;br /&gt;
* Push test on WER scoring&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Multi-language W2V model features were used for ASR experiments and compared with traditional MFCC features&lt;br /&gt;
||&lt;br /&gt;
* Asr experiments on different layers of multilingual W2V model&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
*Find the baseline for CN-Celeb speaker identification&lt;br /&gt;
||&lt;br /&gt;
*Train this baseline and find more face recognition baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Check CKA&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:All_high_data.txt</id>
		<title>文件:All high data.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:All_high_data.txt"/>
				<updated>2022-01-24T12:04:11Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Target_video.txt</id>
		<title>文件:Target video.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Target_video.txt"/>
				<updated>2022-01-24T12:03:48Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Face_video.txt</id>
		<title>文件:Face video.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Face_video.txt"/>
				<updated>2022-01-24T12:03:18Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Conf_meta.txt</id>
		<title>文件:Conf meta.txt</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/%E6%96%87%E4%BB%B6:Conf_meta.txt"/>
				<updated>2022-01-24T12:02:10Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-24</id>
		<title>2022-01-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-24"/>
				<updated>2022-01-24T11:03:54Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Reschedule the cycleFlow paper&lt;br /&gt;
* Keep on investigation for multi-modality information fusion&lt;br /&gt;
||&lt;br /&gt;
* Rewrite cycleFlow&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*Investigate railway Bureau; the practical application scenario of intelligent inspection robot &lt;br /&gt;
*Prepare a report for Gusu Lab&lt;br /&gt;
||&lt;br /&gt;
* keep on intelligent sensor investigation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Submit final papers&lt;br /&gt;
* Prepare hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investige forward attention [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on forward attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some experiments on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/1c/Autovc.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* more experiments for cycle loss&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Experiments on kmeans and use label for clustering &lt;br /&gt;
* Experiments on # of phn kinds&lt;br /&gt;
* [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=cchen&amp;amp;step=view_request&amp;amp;cvssid=846 cvss]&lt;br /&gt;
||&lt;br /&gt;
* Check the experimrnt of label clustering&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* study representation learning(self-supervised learning)&lt;br /&gt;
* Using mel-spectrum and standard softmax on small data, models are trained&lt;br /&gt;
* Data preprocessing&lt;br /&gt;
||&lt;br /&gt;
* Visualization on a small models(mel-spectrum &amp;amp; MFCC)&lt;br /&gt;
* Implement RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Conduct wav2vec test&lt;br /&gt;
* Adjust andRun wav2vec+decoder model(training 40k/50k)&lt;br /&gt;
* Add experiment details to paper&lt;br /&gt;
||&lt;br /&gt;
* paper submission to Arxiv&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Training supervised speech recognition models using wav2vec features&lt;br /&gt;
||&lt;br /&gt;
* Experiments with more data sets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Extract MFCC features for GAN&lt;br /&gt;
* Learning Clustering Algorithms&lt;br /&gt;
||&lt;br /&gt;
* go on GAN experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Complete data merge&lt;br /&gt;
* Prepare group report&lt;br /&gt;
||&lt;br /&gt;
* Comparison of audio-video baseline&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Preproess data for the training of UIS-RNN&lt;br /&gt;
* Train the UIS-RNN model on small dataset&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish MI test.&lt;br /&gt;
* Finish CKA test.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	<entry>
		<id>http://index.cslt.org/mediawiki/index.php/2022-01-24</id>
		<title>2022-01-24</title>
		<link rel="alternate" type="text/html" href="http://index.cslt.org/mediawiki/index.php/2022-01-24"/>
				<updated>2022-01-24T11:02:12Z</updated>
		
		<summary type="html">&lt;p&gt;Jianghaoyu：&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
!People !! This Week !! Next Week !! Task Tracking (&amp;lt;font color=&amp;quot;red&amp;quot;&amp;gt;DeadLine&amp;lt;/font&amp;gt;)&lt;br /&gt;
|-&lt;br /&gt;
|-&lt;br /&gt;
|Dong Wang&lt;br /&gt;
|| &lt;br /&gt;
* Reschedule the cycleFlow paper&lt;br /&gt;
* Keep on investigation for multi-modality information fusion&lt;br /&gt;
||&lt;br /&gt;
* Rewrite cycleFlow&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Yunqi Cai&lt;br /&gt;
||  &lt;br /&gt;
*Investigate railway Bureau; the practical application scenario of intelligent inspection robot &lt;br /&gt;
*Prepare a report for Gusu Lab&lt;br /&gt;
||&lt;br /&gt;
* keep on intelligent sensor investigation&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Lantian Li&lt;br /&gt;
|| &lt;br /&gt;
* Push CNCSRC&lt;br /&gt;
* Submit final papers&lt;br /&gt;
* Prepare hard trials paper&lt;br /&gt;
||&lt;br /&gt;
* Go on hard trials paper&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ying Shi&lt;br /&gt;
|| &lt;br /&gt;
* investige forward attention [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=shiying&amp;amp;step=view_request&amp;amp;cvssid=829 here]&lt;br /&gt;
||&lt;br /&gt;
* continue on forward attention&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoran Sun&lt;br /&gt;
|| &lt;br /&gt;
* some experiments on AutoVC [http://cslt.riit.tsinghua.edu.cn/mediawiki/images/1/1c/Autovc.pdf pdf]&lt;br /&gt;
||&lt;br /&gt;
* more experiments for cycle loss&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Chen Chen&lt;br /&gt;
|| &lt;br /&gt;
* Experiments on kmeans and use label for clustering &lt;br /&gt;
* Experiments on # of phn kinds&lt;br /&gt;
* [http://192.168.0.51:5555/cgi-bin/cvss/cvss_request.pl?account=cchen&amp;amp;step=view_request&amp;amp;cvssid=846 cvss]&lt;br /&gt;
||&lt;br /&gt;
* Check the experimrnt of label clustering&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Pengqi Li&lt;br /&gt;
||  &lt;br /&gt;
* study representation learning(self-supervised learning)&lt;br /&gt;
* Using mel-spectrum and standard softmax on small data, models are trained&lt;br /&gt;
* Data preprocessing&lt;br /&gt;
||&lt;br /&gt;
* Visualization on a small models(mel-spectrum &amp;amp; MFCC)&lt;br /&gt;
* Implement RELAX&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Weida Liang&lt;br /&gt;
||  &lt;br /&gt;
* Conduct wav2vec test&lt;br /&gt;
* Adjust andRun wav2vec+decoder model(training 40k/50k)&lt;br /&gt;
* Add experiment details to paper&lt;br /&gt;
||&lt;br /&gt;
* paper submission to Arxiv&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Zixi Yan&lt;br /&gt;
||  &lt;br /&gt;
* Training supervised speech recognition models using wav2vec features&lt;br /&gt;
||&lt;br /&gt;
* Experiments with more data sets&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Sirui Li&lt;br /&gt;
||  &lt;br /&gt;
* Extract MFCC features for GAN&lt;br /&gt;
* Learning Clustering Algorithms&lt;br /&gt;
||&lt;br /&gt;
* go on GAN experiment&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Haoyu Jiang&lt;br /&gt;
|| &lt;br /&gt;
* Complete data merge&lt;br /&gt;
* Prepare group report&lt;br /&gt;
||&lt;br /&gt;
* Comparison of audio-video data&lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Ruihai Hou&lt;br /&gt;
|| &lt;br /&gt;
* Preproess data for the training of UIS-RNN&lt;br /&gt;
* Train the UIS-RNN model on small dataset&lt;br /&gt;
||&lt;br /&gt;
* &lt;br /&gt;
||&lt;br /&gt;
*  &lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|-&lt;br /&gt;
|Renmiao Chen&lt;br /&gt;
|| &lt;br /&gt;
* Finish MI test.&lt;br /&gt;
* Finish CKA test.&lt;br /&gt;
||&lt;br /&gt;
*&lt;br /&gt;
|-&lt;br /&gt;
&lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Jianghaoyu</name></author>	</entry>

	</feed>