﻿<?xml version="1.0" encoding="UTF-8"?><rss version="2.0"
	xmlns:content="http://purl.org/rss/1.0/modules/content/"
	xmlns:wfw="http://wellformedweb.org/CommentAPI/"
	xmlns:dc="http://purl.org/dc/elements/1.1/"
	xmlns:atom="http://www.w3.org/2005/Atom"
	xmlns:sy="http://purl.org/rss/1.0/modules/syndication/"
	xmlns:slash="http://purl.org/rss/1.0/modules/slash/"
	>

<channel>
	<title>文献库 &#8211; 学术创新中心</title>
	<atom:link href="https://www.leexinghai.com/aic/category/zafu/workspace/wxk/feed/" rel="self" type="application/rss+xml" />
	<link>https://www.leexinghai.com/aic</link>
	<description>Academic Innovation Center</description>
	<lastBuildDate>Mon, 24 Nov 2025 10:22:36 +0000</lastBuildDate>
	<language>zh-Hans</language>
	<sy:updatePeriod>
	hourly	</sy:updatePeriod>
	<sy:updateFrequency>
	1	</sy:updateFrequency>
	<generator>https://wordpress.org/?v=6.9.4</generator>

 
	<item>
		<title>[文献CS-LVLM-EN-20231116]Video-LLaVA: Learning United Visual Representation by Alignment Before Projection</title>
		<link>https://www.leexinghai.com/aic/%e6%96%87%e7%8c%aecs-lvlm-en-20231116video-llava-learning-united-visual-representation-by-alignment-before-projection/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Thu, 20 Nov 2025 02:36:05 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[LVLM大型视觉语言模型]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3976</guid>

					<description><![CDATA[文献索引号： https://doi.org/10.48550/arXiv.2311.10122]]></description>
										<content:encoded><![CDATA[
<p>文献索引号：</p>



<figure class="wp-block-table"><table class="has-fixed-layout"><tbody><tr><td><a href="https://doi.org/10.48550/arXiv.2311.10122">https://doi.org/10.48550/arXiv.2311.10122</a></td></tr></tbody></table></figure>



<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLaVA-Learning-United-Visual-Representation-by-Alignment-Before-Projection.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Video-LLaVA Learning United Visual Representation by Alignment Before Projection"></object><a id="wp-block-file--media-e9186dae-8045-4ff7-b307-6b72a5c23543" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLaVA-Learning-United-Visual-Representation-by-Alignment-Before-Projection.pdf">Video-LLaVA Learning United Visual Representation by Alignment Before Projection</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLaVA-Learning-United-Visual-Representation-by-Alignment-Before-Projection.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-e9186dae-8045-4ff7-b307-6b72a5c23543">下载</a></div>



<p></p>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献CS-LLM-EN-20241204]Video LLMs for Temporal Reasoning in Long Videos</title>
		<link>https://www.leexinghai.com/aic/%e6%96%87%e7%8c%aecs-llm-en-20241204video-llms-for-temporal-reasoning-in-long-videos/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Thu, 20 Nov 2025 02:33:00 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[LLM大语言模型]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3971</guid>

					<description><![CDATA[文献索引号： https://doi.org/10.48550/arXiv.2412.02930]]></description>
										<content:encoded><![CDATA[
<p>文献索引号：</p>



<figure class="wp-block-table"><table class="has-fixed-layout"><tbody><tr><td><a href="https://doi.org/10.48550/arXiv.2412.02930">https://doi.org/10.48550/arXiv.2412.02930</a></td></tr></tbody></table></figure>



<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLMs-for-Temporal-Reasoning-in-Long-Videos.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Video LLMs for Temporal Reasoning in Long Videos"></object><a id="wp-block-file--media-5626958a-d472-4a08-b0a8-c7e5ef110954" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLMs-for-Temporal-Reasoning-in-Long-Videos.pdf">Video LLMs for Temporal Reasoning in Long Videos</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Video-LLMs-for-Temporal-Reasoning-in-Long-Videos.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-5626958a-d472-4a08-b0a8-c7e5ef110954">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献CS-NEP-EN-20250528]Fostering Video Reasoning via Next-Event Prediction</title>
		<link>https://www.leexinghai.com/aic/%e6%96%87%e7%8c%aecs-nep-en-20250528fostering-video-reasoning-via-next-event-prediction/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Thu, 20 Nov 2025 02:25:02 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[NEP下一事件预测]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3967</guid>

					<description><![CDATA[文献索引号： https://doi.org/10.48550/arXiv.2505.22457]]></description>
										<content:encoded><![CDATA[
<p>文献索引号：</p>



<figure class="wp-block-table"><table class="has-fixed-layout"><tbody><tr><td><a href="https://doi.org/10.48550/arXiv.2505.22457">https://doi.org/10.48550/arXiv.2505.22457</a></td></tr></tbody></table></figure>



<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Fostering-Video-Reasoning-via-Next-Event-Prediction-TEMPORALVLM.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Fostering Video Reasoning via Next-Event Prediction-TEMPORALVLM"></object><a id="wp-block-file--media-70ab478a-2df8-4881-9037-fce607c1eccf" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Fostering-Video-Reasoning-via-Next-Event-Prediction-TEMPORALVLM.pdf">Fostering Video Reasoning via Next-Event Prediction-TEMPORALVLM</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Fostering-Video-Reasoning-via-Next-Event-Prediction-TEMPORALVLM.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-70ab478a-2df8-4881-9037-fce607c1eccf">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献MLR-ML-EN-20241104]Chronos- Learning the Language of Time Series</title>
		<link>https://www.leexinghai.com/aic/%e6%96%87%e7%8c%aemlr-ml-en-20241104chronos-learning-the-language-of-time-series/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Thu, 20 Nov 2025 02:19:53 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[ML机器学习]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3963</guid>

					<description><![CDATA[索引号： https://doi.org/10.48550/arXiv.2403.07815]]></description>
										<content:encoded><![CDATA[
<p>索引号：</p>



<figure class="wp-block-table"><table class="has-fixed-layout"><tbody><tr><td><a href="https://doi.org/10.48550/arXiv.2403.07815">https://doi.org/10.48550/arXiv.2403.07815</a></td></tr></tbody></table></figure>



<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Chronos-Learning-the-Language-of-Time-Series.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Chronos- Learning the Language of Time Series"></object><a id="wp-block-file--media-fb1d8768-74ca-4508-8224-28b14c49d1a1" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Chronos-Learning-the-Language-of-Time-Series.pdf">Chronos- Learning the Language of Time Series</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/11/Chronos-Learning-the-Language-of-Time-Series.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-fb1d8768-74ca-4508-8224-28b14c49d1a1">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献MDPI-HSI-EN-2510284]Early Detection and Dynamic Grading of Sweet Potato Scab Based on Hyperspectral Imaging</title>
		<link>https://www.leexinghai.com/aic/mdpi-hsi-en-2510284/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Tue, 28 Oct 2025 01:29:51 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[HSI高光谱]]></category>
		<category><![CDATA[NEP下一事件预测]]></category>
		<category><![CDATA[第4次组会后工作内容]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3738</guid>

					<description><![CDATA[]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Early-Detection-and-Dynamic-Grading-of-Sweet-Potato-Scab-Based-on-Hyperspectral-Imaging.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Early Detection and Dynamic Grading of Sweet Potato Scab Based on Hyperspectral Imaging"></object><a id="wp-block-file--media-f6358f65-3d7c-41e8-b29f-d6e0f0b6721e" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Early-Detection-and-Dynamic-Grading-of-Sweet-Potato-Scab-Based-on-Hyperspectral-Imaging.pdf">Early Detection and Dynamic Grading of Sweet Potato Scab Based on Hyperspectral Imaging</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Early-Detection-and-Dynamic-Grading-of-Sweet-Potato-Scab-Based-on-Hyperspectral-Imaging.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-f6358f65-3d7c-41e8-b29f-d6e0f0b6721e">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献FRT-FS-EN-2510283]Few-shot disease recognition algorithm based on supervised contrastive learning</title>
		<link>https://www.leexinghai.com/aic/frt-fs-en-2510283/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Tue, 28 Oct 2025 01:24:25 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[FSL小样本学习]]></category>
		<category><![CDATA[第4次组会后工作内容]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3732</guid>

					<description><![CDATA[Diseases cause crop yield reduction and quality decline, which has a great impact on agricultural production. Plant disease recognition based on computer vision can help farmers quickly and accurately recognize diseases. However, the occurrence of diseases is random and the collection cost is very high. In many cases, the number of disease samples that can be used to train the disease classifier is small. To address this problem, we propose a few-shot disease recognition algorithm that uses supervised contrastive learning. Our algorithm is divided into two phases: supervised contrastive learning and meta-learning. In the first phase, we use a supervised contrastive learning algorithm to train an encoder with strong generalization capabilities using a large number of samples. In the second phase, we treat this encoder as an extractor of plant disease features and adopt the meta-learning training mechanism to accomplish the few-shot disease recognition tasks by training a nearest-centroid classifier based on distance metrics. The experimental results indicate that the proposed method outperforms the other nine popular few-shot learning algorithms as a comparison in the disease recognition accuracy over the public plant disease dataset PlantVillage. In few-shot potato leaf disease recognition tasks in natural scenarios, the accuracy of the model reaches the accuracy of 79.51% with only 30 training images. The experiment also revealed that, in the contrastive learning phase, the combination of different image augmentation operations has a greater impact on model. Furthermore, the introduction of label information in supervised contrastive learning enables our algorithm to still obtain high accuracy in few-shot disease recognition tasks with smaller batch size, thus allowing us to complete the training with less GPU resource compared to traditional contrastive learning.]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-disease-recognition-algorithm-based-on-supervised-contrastive-learning.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Few-shot disease recognition algorithm based on supervised contrastive learning"></object><a id="wp-block-file--media-48bf4400-2d4f-4c6c-9ba7-a66ac930559c" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-disease-recognition-algorithm-based-on-supervised-contrastive-learning.pdf">Few-shot disease recognition algorithm based on supervised contrastive learning</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-disease-recognition-algorithm-based-on-supervised-contrastive-learning.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-48bf4400-2d4f-4c6c-9ba7-a66ac930559c">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献FRT-FS-EN-2510282]Few-shot crop disease recognition using sequence- weighted ensemble model-agnostic meta-learning</title>
		<link>https://www.leexinghai.com/aic/frt-fs-en-2510282/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Tue, 28 Oct 2025 01:22:15 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[FSL小样本学习]]></category>
		<category><![CDATA[第4次组会后工作内容]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3728</guid>

					<description><![CDATA[Diseases pose significant threats to crop production, leading to substantial yield reductions and jeopardizing global food security. Timely and accurate detection of crop diseases is essential for ensuring sustainable agricultural development and effective crop management. While deep learning-based computer vision techniques have emerged as powerful tools for crop disease recognition, these methods are heavily reliant on large datasets, which are often difficult to obtain in practical agricultural settings. This challenge highlights the need for models capable of learning from limited data, a scenario known as the few-shot learning problem. In this paper, we introduce a novel few-shot learning approach, the Sequence-Weighted Ensemble Model-Agnostic Meta-Learning (SWE-MAML), designed to train crop disease recognition models with minimal sample sizes. The SWE-MAML framework employs meta-learning to sequentially train a set of base learners, followed by a weighted sum of their predictions for classifying plant disease images. This method integrates ensemble learning with Model-Agnostic Meta-Learning (MAML), allowing the effective training of multiple classifiers within the MAML framework. Experimental results show that SWE-MAML demonstrates strong competitiveness compared to state-of-the-art algorithms on the PlantVillage dataset. Compared to the original MAML, SWE-MAML improves accuracy by 3.75%–8.59%. Furthermore, we observe that the number of base learners significantly influences model performance, with an optimal range of 5–7 learners. Additionally, pre-training with a larger number of disease classes enhances the model’s ability to recognize “unseen” classes. SWE-MAML was also applied to a real-world few-shot potato disease recognition task, achieving an accuracy of 75.71% using just 30 images per disease class in the support set. These findings validate that SWE-MAML is a highly effective solution for the few-shot recognition of crop diseases, offering a promising approach for practical deployment in agricultural settings where data scarcity is a major challenge. The integration of ensemble learning with meta-learning enables high-performance disease recognition with minimal data, marking a significant advancement in the field.]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-crop-disease-recognition-using-sequence-weighted-ensemble-model-agnostic-meta-learning.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 Few-shot crop disease recognition using sequence- weighted ensemble model-agnostic meta-learning"></object><a id="wp-block-file--media-db9ea0b8-f0aa-4f99-85a4-7c7d47d9e5d0" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-crop-disease-recognition-using-sequence-weighted-ensemble-model-agnostic-meta-learning.pdf">Few-shot crop disease recognition using sequence- weighted ensemble model-agnostic meta-learning</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/Few-shot-crop-disease-recognition-using-sequence-weighted-ensemble-model-agnostic-meta-learning.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-db9ea0b8-f0aa-4f99-85a4-7c7d47d9e5d0">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献SCI-FS-EN-2510281]PlantCaFo: An efficient few-shot plant disease recognition method based on foundation models</title>
		<link>https://www.leexinghai.com/aic/sci-fs-en-2510281/</link>
		
		<dc:creator><![CDATA[李星海]]></dc:creator>
		<pubDate>Tue, 28 Oct 2025 01:11:40 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[FSL小样本学习]]></category>
		<category><![CDATA[第4次组会后工作内容]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3724</guid>

					<description><![CDATA[Although plant disease recognition is highly important in agricultural production, traditional methods face challenges due to the high costs associated with data collection and the scarcity of samples. Few-shot plant disease identification tasks, which are based on transfer learning, can learn feature representations from a small amount of data; however, most of these methods require pretraining within the relevant domain. Recently, foundation models have demonstrated excellent performance in zero-shot and few-shot learning scenarios. In this study, we explore the potential of foundation models in plant disease recognition by proposing an efficient few-shot plant disease recognition model (PlantCaFo) based on foundation models. This model operates on an end-to-end network structure, integrating prior knowledge from multiple pretraining models. Specifically, we design a lightweight dilated contextual adapter (DCon-Adapter) to learn new knowledge from training data and use a weight decomposition matrix (WDM) to update the text weights. We test the proposed model on a public dataset, PlantVillage, and show that the model achieves an accuracy of 93.53 ​% in a “38-way 16-shot” setting. In addition, we conduct experiments on images collected from natural environments (Cassava dataset), achieving an accuracy improvement of 6.80 ​% over the baseline. To validate the model's generalization performance, we prepare an out-of-distribution dataset with 21 categories, and our model notably increases the accuracy of this dataset. Extensive experiments demonstrate that our model exhibits superior performance over other models in few-shot plant disease identification.]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/PlantCaFo-An-efficient-few-shot-plant-disease-recognition-method-based-on-foundation-models.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 PlantCaFo An efficient few-shot plant disease recognition method based on foundation models"></object><a id="wp-block-file--media-47b8dd6d-f1ce-47a1-adb8-45b97664cee1" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/PlantCaFo-An-efficient-few-shot-plant-disease-recognition-method-based-on-foundation-models.pdf">PlantCaFo An efficient few-shot plant disease recognition method based on foundation models</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/10/PlantCaFo-An-efficient-few-shot-plant-disease-recognition-method-based-on-foundation-models.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-47b8dd6d-f1ce-47a1-adb8-45b97664cee1">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献TR-PHD-MM-ZH-2509294]-玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析</title>
		<link>https://www.leexinghai.com/aic/tr-phd-mm-zh-250929-4/</link>
		
		<dc:creator><![CDATA[外部作者]]></dc:creator>
		<pubDate>Mon, 29 Sep 2025 01:53:12 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[2025]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3538</guid>

					<description><![CDATA[]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析.pdf" type="application/pdf" style="width:100%;height:1120px" aria-label="嵌入 玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析"></object><a id="wp-block-file--media-62e4b171-00d2-4e45-b626-61f7442c4471" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析.pdf">玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/玉米雄穗三维表型信息精准解析及关键性状基因挖掘分析.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-62e4b171-00d2-4e45-b626-61f7442c4471">下载</a></div>



<figure class="wp-block-image size-full"><img fetchpriority="high" decoding="async" width="598" height="340" src="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/image-13.png" alt="" class="wp-image-3540" srcset="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/image-13.png 598w, https://www.leexinghai.com/aic/wp-content/uploads/2025/09/image-13-300x171.png 300w" sizes="(max-width: 598px) 100vw, 598px" /></figure>
]]></content:encoded>
					
		
		
			</item>
		<item>
		<title>[文献TR-PHD-MM-ZH-2509293]-计算机视觉驱动的织物手感多模态评价系统构建</title>
		<link>https://www.leexinghai.com/aic/tr-phd-mm-zh-250929-3/</link>
		
		<dc:creator><![CDATA[外部作者]]></dc:creator>
		<pubDate>Mon, 29 Sep 2025 01:49:05 +0000</pubDate>
				<category><![CDATA[文献库]]></category>
		<category><![CDATA[2025]]></category>
		<guid isPermaLink="false">https://www.leexinghai.com/aic/?p=3534</guid>

					<description><![CDATA[]]></description>
										<content:encoded><![CDATA[
<div data-wp-interactive="core/file" class="wp-block-file"><object data-wp-bind--hidden="!state.hasPdfPreview" hidden class="wp-block-file__embed" data="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/计算机视觉驱动的织物手感多模态评价系统构建.pdf" type="application/pdf" style="width:100%;height:600px" aria-label="嵌入 计算机视觉驱动的织物手感多模态评价系统构建"></object><a id="wp-block-file--media-50105435-288f-4057-a0e8-799c3c72085a" href="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/计算机视觉驱动的织物手感多模态评价系统构建.pdf">计算机视觉驱动的织物手感多模态评价系统构建</a><a href="https://www.leexinghai.com/aic/wp-content/uploads/2025/09/计算机视觉驱动的织物手感多模态评价系统构建.pdf" class="wp-block-file__button wp-element-button" download aria-describedby="wp-block-file--media-50105435-288f-4057-a0e8-799c3c72085a">下载</a></div>
]]></content:encoded>
					
		
		
			</item>
	</channel>
</rss>
