实在是太喜欢Richard Szeliski的这本书了,每一章节(after chapter3)都详述了该研究方向比较新的成果,还有许多许多的reference,如果你感兴趣,完全可以看那些参考论文
g(x) = af (x) +b a和b有时被认为用来控制对比度和亮度,在我的opencv栏目有个例子是简单的对比度和亮度变换,用的就是这个公式
g(x) = a(x)f (x) + b(x) a,b不一定是常数,可以是空间上的函数
g(x) = (1 − α)f0(x) + αf1(x) α from0→1 可以实现两幅图像的淡入淡出
在OpenCV里有addWeighted( src1, alpha, src2, beta, 0.0, dst);这个函数,就是实现这个式子的
g(x) = [f(x)]1/γ 这是伽马校正属于幂变换,通常用于图像预处理阶段,对于大多数数字摄像机来说γ≈2.2
除了伽马校正,幂变换在控制对比度也很有用,可以取不同的γ试一试
g(x) = L -1 -f(x) 灰度级属于[0,L-1] 这是图像反转 可用于增强嵌入与图像暗色区域的白色或灰色细节
g(x) = clog(1+f(x)) 对数变换
public int[][] Histogram_Equalization(int[][] oldmat) { int[][] new_mat = new int[height][width]; int[] tmp = new int[256]; for(int i = 0;i < width;i++){ for(int j = 0;j < height;j++){ //System.out.println(oldmat[j][i]); int index = oldmat[j][i]; tmp[index]++; } } float[] C = new float[256]; int total = width*height; //计算累积函数 for(int i = 0;i < 256 ; i++){ if(i == 0) C[i] = 1.0f * tmp[i] / total; else C[i] = C[i-1] + 1.0f * tmp[i] / total; } for(int i = 0;i < width;i++){ for(int j = 0;j < height;j++){ new_mat[j][i] = (int)(C[oldmat[j][i]] * 255); new_mat[j][i] = new_mat[j][i] + (new_mat[j][i] << 8) + (new_mat[j][i] << 16); //System.out.println(new_mat[j][i]); } } return new_mat; }
/* * CLAHE * 自适应直方图均衡化 */ public int[][] AHE(int[][] oldmat,int pblock) { int block = pblock; //将图像均匀分成等矩形大小,8行8列64个块是常用的选择 int width_block = width/block; int height_block = height/block; //存储各个直方图 int[][] tmp = new int[block*block][256]; //存储累积函数 float[][] C = new float[block*block][256]; //计算累积函数 for(int i = 0 ; i < block ; i ++) { for(int j = 0 ; j < block ; j++) { int start_x = i * width_block; int end_x = start_x + width_block; int start_y = j * height_block; int end_y = start_y + height_block; int num = i+block*j; int total = width_block * height_block; for(int ii = start_x ; ii < end_x ; ii++) { for(int jj = start_y ; jj < end_y ; jj++) { int index = oldmat[jj][ii]; tmp[num][index]++; } } //裁剪操作 int average = width_block * height_block / 255; int LIMIT = 4 * average; int steal = 0; for(int k = 0 ; k < 256 ; k++) { if(tmp[num][k] > LIMIT){ steal += tmp[num][k] - LIMIT; tmp[num][k] = LIMIT; } } int bonus = steal/256; //hand out the steals averagely for(int k = 0 ; k < 256 ; k++) { tmp[num][k] += bonus; } //计算累积分布直方图 for(int k = 0 ; k < 256 ; k++) { if( k == 0) C[num][k] = 1.0f * tmp[num][k] / total; else C[num][k] = C[num][k-1] + 1.0f * tmp[num][k] / total; } } } int[][] new_mat = new int[height][width]; //计算变换后的像素值 //根据像素点的位置,选择不同的计算方法 for(int i = 0 ; i < width; i++) { for(int j = 0 ; j < height; j++) { //four coners if(i <= width_block/2 && j <= height_block/2) { int num = 0; new_mat[j][i] = (int)(C[num][oldmat[j][i]] * 255); }else if(i <= width_block/2 && j >= ((block-1)*height_block + height_block/2)){ int num = block*(block-1); new_mat[j][i] = (int)(C[num][oldmat[j][i]] * 255); }else if(i >= ((block-1)*width_block+width_block/2) && j <= height_block/2){ int num = block-1; new_mat[j][i] = (int)(C[num][oldmat[j][i]] * 255); }else if(i >= ((block-1)*width_block+width_block/2) && j >= ((block-1)*height_block + height_block/2)){ int num = block*block-1; new_mat[j][i] = (int)(C[num][oldmat[j][i]] * 255); } //four edges except coners else if( i <= width_block/2 ) { //线性插值 int num_i = 0; int num_j = (j - height_block/2)/height_block; int num1 = num_j*block + num_i; int num2 = num1 + block; float p = (j - (num_j*height_block+height_block/2))/(1.0f*height_block); float q = 1-p; new_mat[j][i] = (int)((q*C[num1][oldmat[j][i]]+ p*C[num2][oldmat[j][i]])* 255); }else if( i >= ((block-1)*width_block+width_block/2)){ //线性插值 int num_i = block-1; int num_j = (j - height_block/2)/height_block; int num1 = num_j*block + num_i; int num2 = num1 + block; float p = (j - (num_j*height_block+height_block/2))/(1.0f*height_block); float q = 1-p; new_mat[j][i] = (int)((q*C[num1][oldmat[j][i]]+ p*C[num2][oldmat[j][i]])* 255); }else if( j <= height_block/2 ){ //线性插值 int num_i = (i - width_block/2)/width_block; int num_j = 0; int num1 = num_j*block + num_i; int num2 = num1 + 1; float p = (i - (num_i*width_block+width_block/2))/(1.0f*width_block); float q = 1-p; new_mat[j][i] = (int)((q*C[num1][oldmat[j][i]]+ p*C[num2][oldmat[j][i]])* 255); }else if( j >= ((block-1)*height_block + height_block/2) ){ //线性插值 int num_i = (i - width_block/2)/width_block; int num_j = block-1; int num1 = num_j*block + num_i; int num2 = num1 + 1; float p = (i - (num_i*width_block+width_block/2))/(1.0f*width_block); float q = 1-p; new_mat[j][i] = (int)((q*C[num1][oldmat[j][i]]+ p*C[num2][oldmat[j][i]])* 255); } //inner area else{ int num_i = (i - width_block/2)/width_block; int num_j = (j - height_block/2)/height_block; int num1 = num_j*block + num_i; int num2 = num1 + 1; int num3 = num1 + block; int num4 = num2 + block; float u = (i - (num_i*width_block+width_block/2))/(1.0f*width_block); float v = (j - (num_j*height_block+height_block/2))/(1.0f*height_block); new_mat[j][i] = (int)((u*v*C[num4][oldmat[j][i]] + (1-v)*(1-u)*C[num1][oldmat[j][i]] + u*(1-v)*C[num2][oldmat[j][i]] + v*(1-u)*C[num3][oldmat[j][i]]) * 255); } new_mat[j][i] = new_mat[j][i] + (new_mat[j][i] << 8) + (new_mat[j][i] << 16); } } return new_mat; }
Write a simple application to change the color balance of an imageby multiplying each color value by a different user-specified constant. If you want to getfancy, you can make this application interactive, with sliders.
我只是很简单地将颜色乘以系数,有slider,比较方便~~
#include "opencv2/highgui/highgui.hpp" #include <iostream> using namespace cv; int alpha = 50; Mat image,new_image; static void change_color(int, void*) { for( int y = 0; y < image.rows; y++ ) for( int x = 0; x < image.cols; x++ ) for( int c = 0; c < 3; c++ ) new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha/50.0 *( image.at<Vec3b>(y,x)[c] )); imshow("Image", new_image); } int main( int, char** argv ) { image = imread( argv[1] ); new_image = Mat::zeros( image.size(), image.type() ); namedWindow("Image", 1); createTrackbar( "pick:", "Image", &alpha, 100, change_color); change_color(0, 0); waitKey(); return 0; }
In[1]:= |
![]() |
Out[1]= | ![]() |
In[2]:= |
![]() |
Out[2]= | ![]() |
K =vhT
1 | 1 | 1 |
1 | -8 | 1 |
1 | 1 | 1 |
0 | 1 | 0 |
1 | -4 | 1 |
0 | 1 | 0 |
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <stdlib.h> #include <stdio.h> using namespace cv; int main( int, char** argv ) { Mat src, src_gray; Mat grad; const char* window_name = "Sobel Demo - Simple Edge Detector"; int scale = 1; int delta = 0; int ddepth = CV_16S; /// Load an image src = imread( argv[1] ); if( !src.data ) { return -1; } GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); /// Convert it to gray cvtColor( src, src_gray, CV_RGB2GRAY ); /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); /// Generate grad_x and grad_y Mat grad_x, grad_y; Mat abs_grad_x, abs_grad_y; /// Gradient X Sobel( src_gray, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT ); convertScaleAbs( grad_x, abs_grad_x ); /// Gradient Y Sobel( src_gray, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT ); convertScaleAbs( grad_y, abs_grad_y ); /// Total Gradient (approximate) addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad ); imshow( window_name, grad ); waitKey(0); return 0; }
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <stdlib.h> #include <stdio.h> using namespace cv; int main( int, char** argv ) { Mat src, src_gray, dst; int kernel_size = 3; int scale = 1; int delta = 0; int ddepth = CV_16S; const char* window_name = "Laplace Demo"; /// Load an image src = imread( argv[1] ); if( !src.data ) { return -1; } /// Remove noise by blurring with a Gaussian filter GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); /// Convert the image to grayscale cvtColor( src, src_gray, CV_RGB2GRAY ); /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); /// Apply Laplace function Mat abs_dst; Laplacian( src_gray, dst, ddepth, kernel_size, scale, delta, BORDER_DEFAULT ); convertScaleAbs( dst, abs_dst ); /// Show what you got imshow( window_name, abs_dst ); waitKey(0); return 0; }
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <stdlib.h> #include <stdio.h> using namespace cv; int main ( int, char** argv ) { /// Declare variables Mat src, dst; Mat kernel; Point anchor; double delta; int ddepth; int kernel_size; const char* window_name = "filter2D Demo"; int c; /// Load an image src = imread( argv[1] ); if( !src.data ) { return -1; } /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); /// Initialize arguments for the filter anchor = Point( -1, -1 ); delta = 0; ddepth = -1; /// Loop - Will filter the image with different kernel sizes each 0.5 seconds int ind = 0; for(;;) { c = waitKey(500); /// Press 'ESC' to exit the program if( (char)c == 27 ) { break; } /// Update kernel size for a normalized box filter kernel_size = 3 + 2*( ind%5 ); kernel = Mat::ones( kernel_size, kernel_size, CV_32F )/ (float)(kernel_size*kernel_size); /// Apply filter filter2D(src, dst, ddepth , kernel, anchor, delta, BORDER_DEFAULT ); imshow( window_name, dst ); ind++; } return 0; }
很容易发现有 s(i, j) = s(i−1, j) +s(i, j−1)−s(i−1, j−1) +f(i, j).
OpenCV有自带的计算积分图的函数integral 提供了更多选项,sum是和,sqsum是平方和图像,tilted是旋转45度的和
sum: the sum summation integral image
sqsum: the square sum integral image
tilted: image is rotated by 45 degrees and then its integral is calculated
//OpenCV双边滤波 //src:输入图像 //dst:输入图像 //滤波模板半径 //颜色空间标准差 //坐标空间标准差 bilateralFilter(src,dst,5,10.0,2.0);
//关于滤波,还可以参考这里
Iterated adaptive smoothing and anisotropic diffusion(迭代自适应平滑和各向异性扩散)
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <stdlib.h> #include <stdio.h> using namespace cv; /// Global variables Mat src, erosion_dst, dilation_dst; int erosion_elem = 0; int erosion_size = 0; int dilation_elem = 0; int dilation_size = 0; int const max_elem = 2; int const max_kernel_size = 21; /** Function Headers */ void Erosion( int, void* ); void Dilation( int, void* ); /** * @function main */ int main( int, char** argv ) { /// Load an image src = imread( argv[1] ); if( !src.data ) { return -1; } /// Create windows namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE ); namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE ); cvMoveWindow( "Dilation Demo", src.cols, 0 ); /// Create Erosion Trackbar createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", &erosion_elem, max_elem, Erosion ); createTrackbar( "Kernel size:\n 2n +1", "Erosion Demo", &erosion_size, max_kernel_size, Erosion ); /// Create Dilation Trackbar createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Dilation Demo", &dilation_elem, max_elem, Dilation ); createTrackbar( "Kernel size:\n 2n +1", "Dilation Demo", &dilation_size, max_kernel_size, Dilation ); /// Default start Erosion( 0, 0 ); Dilation( 0, 0 ); waitKey(0); return 0; } /** * @function Erosion */ void Erosion( int, void* ) { int erosion_type = 0; if( erosion_elem == 0 ){ erosion_type = MORPH_RECT; } else if( erosion_elem == 1 ){ erosion_type = MORPH_CROSS; } else if( erosion_elem == 2) { erosion_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( erosion_type, Size( 2*erosion_size + 1, 2*erosion_size+1 ), Point( erosion_size, erosion_size ) ); /// Apply the erosion operation erode( src, erosion_dst, element ); imshow( "Erosion Demo", erosion_dst ); } /** * @function Dilation */ void Dilation( int, void* ) { int dilation_type = 0; if( dilation_elem == 0 ){ dilation_type = MORPH_RECT; } else if( dilation_elem == 1 ){ dilation_type = MORPH_CROSS; } else if( dilation_elem == 2) { dilation_type = MORPH_ELLIPSE; } Mat element = getStructuringElement( dilation_type, Size( 2*dilation_size + 1, 2*dilation_size+1 ), Point( dilation_size, dilation_size ) ); /// Apply the dilation operation dilate( src, dilation_dst, element ); imshow( "Dilation Demo", dilation_dst ); }
开运算
dst=open(src,element)=dilate(erode(src,element),element)
闭运算
dst=close(src,element)=erode(dilate(src,element),element)
形态梯度
dst=morph_grad(src,element)=dilate(src,element)-erode(src,element)
"顶帽"
dst=tophat(src,element)=src-open(src,element)
"黑帽"
dst=blackhat(src,element)=close(src,element)-src
临时图像 temp 在形态梯度以及对“顶帽”和“黑帽”操作时的 in-place 模式下需要。
#include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <stdlib.h> #include <stdio.h> using namespace cv; /// Global variables Mat src, dst; int morph_elem = 0; int morph_size = 0; int morph_operator = 0; int const max_operator = 4; int const max_elem = 2; int const max_kernel_size = 21; const char* window_name = "Morphology Transformations Demo"; /** Function Headers */ void Morphology_Operations( int, void* ); /** * @function main */ int main( int, char** argv ) { /// Load an image src = imread( argv[1] ); if( !src.data ) { return -1; } /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); /// Create Trackbar to select Morphology operation createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations ); /// Create Trackbar to select kernel type createTrackbar( "Element:\n 0: Rect - 1: Cross - 2: Ellipse", window_name, &morph_elem, max_elem, Morphology_Operations ); /// Create Trackbar to choose kernel size createTrackbar( "Kernel size:\n 2n +1", window_name, &morph_size, max_kernel_size, Morphology_Operations ); /// Default start Morphology_Operations( 0, 0 ); waitKey(0); return 0; } /** * @function Morphology_Operations */ void Morphology_Operations( int, void* ) { // Since MORPH_X : 2,3,4,5 and 6 int operation = morph_operator + 2; Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) ); /// Apply the specified morphology operation morphologyEx( src, dst, operation, element ); imshow( window_name, dst ); }