• 5星
14.03MB qq_41934573 2021-05-26 22:12:39
• 5星
10.85MB bluesliuf 2021-03-07 14:04:06
• 5星
215KB m0_52957036 2020-04-16 08:07:52
• 5星
14.03MB qq_41934573 2021-05-26 22:12:39
• 5星
1KB weixin_42680139 2021-09-28 22:28:06
• 5.62MB qq_37018213 2018-10-08 20:16:44
• 176KB weixin_44586889 2020-06-29 07:31:50
• ## 利用MATLAB进行音频特征提取 matlab

2.05MB zja0722 2018-03-21 20:55:27
• 5星
3.98MB qq945269529 2015-04-12 22:19:59
• for i=1:26 f=strcat('D:\bishe\',num2str(i)); image=strcat(f,'.jpg'); PS=imread(image); PS=imresize(PS,[300,300],'bilinear');%归一化大小 PS=rgb2gray(PS);... %提取面积，矩形度，圆形度，拉伸度特征

for i=1:26
f=strcat('D:\bishe\',num2str(i));
image=strcat(f,'.jpg');
PS=imresize(PS,[300,300],'bilinear');%归一化大小
PS=rgb2gray(PS);
[m,n]=size(PS); %测量图像尺寸参数
GP=zeros(1,256); %预创建存放灰度出现概率的向量
for k=0:255
GP(k+1)=length(find(PS==k))/(m*n); %计算每级灰度出现的概率，将其存入GP中相应位置
end
%直方图均衡化
S1=zeros(1,256);
for i=1:256
for j=1:i
S1(i)=GP(j)+S1(i); %计算Sk
end
end
S2=round((S1*256)+0.5); %将Sk归到相近级的灰度
%图像均衡化
f=PS;
for i=0:255
f(find(PS==i))=S2(i+1); %将各个像素归一化后的灰度值赋给这个像素
end
figure,imshow(f);
%边缘检测
f=edge(f,'canny',0.25);
imshow(f);
%二值法锐化图像
f=double(f);
g=sqrt(x.*x+y.*y);
i=find(g>=0.5);
g(i)=256;
j=find(g<0.5);
g(j)=0;
imshow(g);
title('二值法锐化图像');
%中值滤波
g=medfilt2(g);
g=dither(g);
imshow(g);
%提取面积，矩形度，圆形度，拉伸度特征

展开全文
weixin_34079307 2021-05-06 06:39:59
• 2KB qq_27233707 2018-05-06 20:13:07

2，基于点特征匹配的杂乱场景目标检测
第一步：读入图片；
wahaha=rgb2gray(wahaha1);
sceneImage=rgb2gray(scenceImage);
第二步：检测特征点；
boxPoints = detectSURFFeatures(wahaha);
scenePoints = detectSURFFeatures(sceneImage);
figure;
imshow(wahaha);
title('100 Strongest Feature Points from wahaha Image');
hold on;
plot(selectStrongest(boxPoints, 100));
figure;
imshow(sceneImage);
title('300 Strongest Feature Points from Scene Image');
hold on;
plot(selectStrongest(scenePoints, 300));

第三步：提取特征描述子；
boxPairs = matchFeatures(boxFeatures, sceneFeatures);
第四步：显示假定匹配的特征；
matchedBoxPoints = boxPoints(boxPairs(:, 1), :);
matchedScenePoints = scenePoints(boxPairs(:, 2), :);
figure;
showMatchedFeatures(wahaha, sceneImage, matchedBoxPoints, ...
matchedScenePoints, 'montage');
title('Putatively Matched Points (Including Outliers)');

第五步：使用场景中假定匹配定位物体；
[tform, inlierBoxPoints, inlierScenePoints] = ...
estimateGeometricTransform(matchedBoxPoints, matchedScenePoints, 'affine');
figure;
showMatchedFeatures(wahaha, sceneImage, inlierBoxPoints, ...
inlierScenePoints, 'montage');
title('Matched Points (Inliers Only)');

第六步：绘制定位框；
boxPolygon = [1, 1;... % top-left
size(wahaha, 2), 1;... % top-right
size(wahaha, 2), size(wahaha, 1);... % bottom-right
1, size(wahaha, 1);... % bottom-left
1, 1]; % top-left again to close the polygon
newBoxPolygon = transformPointsForward(tform, boxPolygon);
figure;
imshow(sceneImage);
hold on;
line(newBoxPolygon(:, 1), newBoxPolygon(:, 2), 'Color', 'y');
title('Detected Box');

第七步：检测另外一个目标；
cupImage=rgb2gray(cup1Image);
cupPoints = detectSURFFeatures(cupImage);
figure;
imshow(cupImage);
hold on;
plot(selectStrongest(cupPoints, 100));
title('100 Strongest Feature Points from cup Image');
% Extract feature descriptors.
[cupFeatures, cupPoints] = extractFeatures(cupImage, cupPoints);
% Match Features
cupPairs = matchFeatures(cupFeatures, sceneFeatures, 'MaxRatio', 0.9);
% Display putatively matched features.
matchedcupPoints = cupPoints(cupPairs(:, 1), :);
matchedScenePoints = scenePoints(cupPairs(:, 2), :);
figure;
showMatchedFeatures(cupImage, sceneImage, matchedcupPoints, ...
matchedScenePoints, 'montage');
title('Putatively Matched Points (Including Outliers)');
% Estimate Geometric Transformation and Eliminate Outliers
[tform, inlierElephantPoints, inlierScenePoints] = ...
estimateGeometricTransform(matchedcupPoints, matchedScenePoints, 'affine');
figure;
showMatchedFeatures(cupImage, sceneImage, inlierElephantPoints, ...
inlierScenePoints, 'montage');
title('Matched Points (Inliers Only)');
% Display Both Objects
cupPolygon = [1, 1;... % top-left
size(cupImage, 2), 1;... % top-right
size(cupImage, 2), size(cupImage, 1);... % bottom-right
1, size(cupImage, 1);... % bottom-left
1,1]; % top-left again to close the polygon
newcupPolygon = transformPointsForward(tform, cupPolygon);
figure;
imshow(sceneImage);
hold on;
line(newBoxPolygon(:, 1), newBoxPolygon(:, 2), 'Color', 'y');
line(newcupPolygon(:, 1), newcupPolygon(:, 2), 'Color', 'g');
title('Detected cup and Box');
displayEndOfDemoMessage(mfilename)


展开全文
weixin_33150295 2021-04-19 09:09:01
• 25KB rvhome 2019-04-13 22:22:40
• ## matlab实现人脸检测并提取摄像头检测的图片 人脸检测

1KB qq_39164787 2017-10-24 14:09:57
• 32KB weixin_38722193 2020-07-31 13:38:50
• 5星
22KB xy9021 2018-10-10 09:38:44
• ## HOG特征提取分析MATLAB代码 HOG 特征提取

5星
17KB qq_36235046 2016-12-09 15:28:43
• ## 六种常用纹理特征提取方法MATLAB.7z matlab

223KB w_wanan 2020-03-21 12:37:19
• 4KB maxiao1204 2016-04-19 20:45:13
• 3KB qq_39964244 2020-03-27 10:49:57
• 5星
48KB weixin_38112199 2018-03-19 16:42:31
• 218KB qq_38537319 2018-07-02 13:28:30
• 204KB weixin_38748875 2020-09-18 20:26:04
• 1.71GB weixin_38742460 2021-05-22 06:56:47
• 5星
149KB qq_35000947 2018-04-01 10:53:29
• ## MATLAB特征点的检测与提取（1） MATLAB

MATLAB的Computer Vision System Toolbox™工具箱中，有FAST，Harris...局部点的检测和提取： 一般用于：1）用于定位图像拼接或三维重建的锚点。2）在不需要图像分割的情况下，紧凑地表示图像内容以进行检测或分...
在MATLAB的Computer Vision System Toolbox™工具箱中，有FAST，Harris和Shi & Tomasi 角点检测子和SURF、MSER斑点检测子。这个工具箱还有SURF,FREAK,BRISK,LBP以及HOG描述子。
局部点的检测和提取：
一般用于：1）用于定位图像拼接或三维重建的锚点。2）在不需要图像分割的情况下，紧凑地表示图像内容以进行检测或分类。
1，图像配准与拼接MATLAB案例
第一步：加载需要拼接的图像；
% Load images.
buildingDir = fullfile(toolboxdir('vision'), 'visiondata', 'building');
buildingScene = imageSet(buildingDir);

% Display images to be stitched
montage(buildingScene.ImageLocation)
第二步：存储图像对；在I(n)和I(n-1)之间检测和匹配特征点；估计几何变换T(n)，从I(n)映射到I(n-1)；计算和转换映射I(n)成全景图像T(1)*...T(n-1)*T(n).
% Read the first image from the image set.

% Initialize features for I(1)
grayImage = rgb2gray(I);
points = detectSURFFeatures(grayImage);
[features, points] = extractFeatures(grayImage, points);

% Initialize all the transforms to the identity matrix. Note that the
% projective transform is used here because the building images are fairly
% close to the camera. Had the scene been captured from a further distance,
% an affine transform would suffice.
tforms(buildingScene.Count) = projective2d(eye(3));

% Iterate over remaining image pairs
for n = 2:buildingScene.Count

% Store points and features for I(n-1).
pointsPrevious = points;
featuresPrevious = features;

% Detect and extract SURF features for I(n).
grayImage = rgb2gray(I);
points = detectSURFFeatures(grayImage);
[features, points] = extractFeatures(grayImage, points);

% Find correspondences between I(n) and I(n-1).
indexPairs = matchFeatures(features, featuresPrevious, 'Unique', true);

matchedPoints = points(indexPairs(:,1), :);
matchedPointsPrev = pointsPrevious(indexPairs(:,2), :);

% Estimate the transformation between I(n) and I(n-1).
tforms(n) = estimateGeometricTransform(matchedPoints, matchedPointsPrev,...
'projective', 'Confidence', 99.9, 'MaxNumTrials', 2000);

% Compute T(1) * ... * T(n-1) * T(n)
tforms(n).T = tforms(n-1).T * tforms(n).T;
end
开始使用projective2d outputLimits 方法来找到每个变换的输出限制。然后，使用输出限制来自动找到大致位于场景中心的图像。
imageSize = size(I);  % all the images are the same size
% Compute the output limits  for each transform
for i = 1:numel(tforms)
[xlim(i,:), ylim(i,:)] = outputLimits(tforms(i), [1 imageSize(2)], [1 imageSize(1)]);
end
接下来，计算每个变换的平均极限X，并找到位于中心的图像。这里只使用限制X，因为场景是水平的。如果使用另一组图像，则可能需要使用X和Y限制来找到中心图像。
avgXLim = mean(xlim, 2);

[~, idx] = sort(avgXLim);

centerIdx = floor((numel(tforms)+1)/2);

centerImageIdx = idx(centerIdx);
Tinv = invert(tforms(centerImageIdx));

for i = 1:numel(tforms)
tforms(i).T = Tinv.T * tforms(i).T;
end
第三步：初始化全景图像
for i = 1:numel(tforms)
[xlim(i,:), ylim(i,:)] = outputLimits(tforms(i), [1 imageSize(2)], [1 imageSize(1)]);
end

% Find the minimum and maximum output limits
xMin = min([1; xlim(:)]);
xMax = max([imageSize(2); xlim(:)]);

yMin = min([1; ylim(:)]);
yMax = max([imageSize(1); ylim(:)]);

% Width and height of panorama.
width  = round(xMax - xMin);
height = round(yMax - yMin);

% Initialize the "empty" panorama.
panorama = zeros([height width 3], 'like', I);
第四步：创造全景图像
blender = vision.AlphaBlender('Operation', 'Binary mask', ...

% Create a 2-D spatial reference object defining the size of the panorama.
xLimits = [xMin xMax];
yLimits = [yMin yMax];
panoramaView = imref2d([height width], xLimits, yLimits);

% Create the panorama.
for i = 1:buildingScene.Count

% Transform I into the panorama.
warpedImage = imwarp(I, tforms(i), 'OutputView', panoramaView);

% Overlay the warpedImage onto the panorama.
panorama = step(blender, panorama, warpedImage, warpedImage(:,:,1));
end

figure
imshow(panorama)

展开全文
qq_42463478 2018-07-20 14:33:35
• 43KB weixin_44348719 2020-10-18 11:21:48
• 3KB qq_27876551 2018-04-02 15:37:04
• 2KB sunflow 2020-02-04 23:41:15
• ## haar特征提取代码 MATLAB版 haar特征

4星
10KB nan10921p 2014-12-26 14:34:14
• 4星
47KB theoryll 2013-08-26 15:53:42

...

matlab 订阅