***convolution.py**** import cv2 import numpy as np image_path = "elephant2.jpeg" image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) kernel = np.ones((3, 3), np.float32) / 9 convolved_image = cv2.filter2D(image, -1, kernel) cv2.imshow("Original Image", image) cv2.imshow("Convolved Image (Smoothing Filter)", convolved_image) cv2.waitKey(0) cv2.destroyAllWindows() *****erodila-morphological opration***** import cv2 import numpy as np # Read the image in grayscale image = cv2.imread('erod.jpg', cv2.IMREAD_GRAYSCALE) # Invert the image invert = cv2.bitwise_not(image) # Define the structuring element kernel = np.ones((5, 5), np.uint8) # Apply morphological operations erosion = cv2.erode(invert, kernel, iterations=1) dilation = cv2.dilate(invert, kernel, iterations=1) opening = cv2.morphologyEx(invert, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(invert, cv2.MORPH_CLOSE, kernel) # Display the images cv2.imshow('Original Image', image) cv2.imshow('Erosion', erosion) cv2.imshow('Dilation', dilation) cv2.imshow('Opening', opening) cv2.imshow('Closing', closing) cv2.waitKey(0) cv2.destroyAllWindows() ****face_det-haar cascade****** import cv2 # Read the image in grayscale gray = cv2.imread("faces.jpg", cv2.IMREAD_GRAYSCALE) # Load the Haar cascade for face detection face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') # Detect faces faces = face_cascade.detectMultiScale(gray, 1.1, 5) # Reload the original image to draw rectangles image = cv2.imread("faces.jpg") for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2) # Display the result cv2.imshow("Face Detection", image) cv2.waitKey(0) cv2.destroyAllWindows() ******fft**** import numpy as np import matplotlib.pyplot as plt # Generate signal fs = 1000 # Sampling rate t = np.linspace(0, 1, fs, endpoint=False) # Time array signal = np.sin(2 * np.pi * 5 * t) + 0.5 * np.random.normal(size=t.shape) # 5Hz signal + noise # FFT fft_result = np.fft.fft(signal) frequencies = np.fft.fftfreq(len(signal), 1/fs) magnitude = np.abs(fft_result[:fs//2]) # Magnitude (only positive frequencies) # Plot plt.subplot(2, 1, 1) plt.plot(t, signal) plt.title("Original Signal") plt.xlabel("Time (s)") plt.ylabel("Amplitude") plt.grid() plt.subplot(2, 1, 2) plt.plot(frequencies[:fs//2], magnitude) plt.title("FFT (Magnitude)") plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude") plt.grid() plt.tight_layout() plt.show() ****gamma****** import cv2 import numpy as np image_path = "bright.jpg" image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) gamma = 4 normalized = image / 255.0 gamma_corrected = cv2.pow(normalized, gamma) gamma_image = cv2.convertScaleAbs(gamma_corrected * 255) cv2.imshow("Original Image", image) cv2.imshow("Gamma Corrected Image", gamma_image) cv2.waitKey(0) cv2.destroyAllWindows() *******harris_corner********* import cv2 import numpy as np # Load the grayscale image and original image gray = cv2.imread("corners.jpg", cv2.IMREAD_GRAYSCALE) image = cv2.imread("corners.jpg") # Harris Corner Detection corners = cv2.dilate(cv2.cornerHarris(np.float32(gray), 2, 3, 0.04), None) # Highlight corners in red image[corners > 0.01 * corners.max()] = [0, 0, 255] # Display the result cv2.imshow("Corners Detected", image) cv2.waitKey(0) cv2.destroyAllWindows() ******hist_equal***** import cv2 image_path = "bright.jpg" image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) hist_eq_image = cv2.equalizeHist(image) cv2.imshow("Original Image", image) cv2.imshow("Histogram Equalized Image", hist_eq_image) cv2.waitKey(0) cv2.destroyAllWindows() *****laplacian**** import cv2 # Read the image in grayscale image_path ="corners.jpg" image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) # Apply Laplacian edge detection laplacian = cv2.Laplacian(image, cv2.CV_64F, ksize=3) laplacian = cv2.convertScaleAbs(laplacian) # Display the images cv2.imshow("Original Image", image) cv2.imshow("Laplacian Edge Detection", laplacian) cv2.waitKey(0) cv2.destroyAllWindows() *****linear_non_linear****** import cv2 import numpy as np image = cv2.imread('elephant2.jpeg') gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gaussian_smoothed = cv2.GaussianBlur(gray_image, (5, 5), 0) median_smoothed = cv2.medianBlur(gray_image, 5) cv2.imshow("Original Image", gray_image) cv2.imshow("Gaussian Smoothing - Linear", gaussian_smoothed) cv2.imshow("Nonlinear Smoothing (Median)", median_smoothed) cv2.waitKey(0) cv2.destroyAllWindows() ***log_transform*** import cv2 import numpy as np # Read the image and apply log transformation image = cv2.imread("dark.jpg", cv2.IMREAD_GRAYSCALE).astype(np.float32) log_image = cv2.convertScaleAbs(255 * np.log(1 + image) / np.log(1 + np.max(image))) # Show images cv2.imshow("Original Image", image.astype(np.uint8)) cv2.imshow("Log Transformed Image", log_image) cv2.waitKey(0) cv2.destroyAllWindows() ***shapes_segmentation**** import cv2 import numpy as np # Read the image in grayscale gray = cv2.imread("circles.jpg", cv2.IMREAD_GRAYSCALE) # Canny edges and Hough Circle detection edges = cv2.Canny(gray, 50, 150) image = cv2.imread("circles.jpg") # Load the original image for visualization circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.5, 150, param1=50, param2=30, minRadius=10, maxRadius=50) if circles is not None: for x, y, r in np.uint16(circles[0]): cv2.circle(image, (x, y), r, (0, 0, 255), 2) # Region-based segmentation _, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) num_labels, labels = cv2.connectedComponents(binary) regions = np.zeros_like(image) for label in range(1, num_labels): regions[labels == label] = np.random.randint(0, 255, 3) # Display results for title, img in [("Edges", edges), ("Circles", image), ("Regions", regions)]: cv2.imshow(title, img) cv2.waitKey(0) cv2.destroyAllWindows() ****smoothing_sharping_unsharp**** import cv2 import numpy as np # Load image directly in grayscale gray_image = cv2.imread("elephant2.jpeg", cv2.IMREAD_GRAYSCALE) # Apply smoothing, sharpening, and unsharp masking in one go smoothed_image = cv2.GaussianBlur(gray_image, (5, 5), 0) sharpened_image = cv2.filter2D(gray_image, -1, np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])) unsharp_image = cv2.addWeighted(gray_image, 1.5, cv2.GaussianBlur(gray_image, (5, 5), 0), -0.5, 0) # Display images cv2.imshow("Original Image", gray_image) cv2.imshow("Smoothed Image", smoothed_image) cv2.imshow("Sharpened Image", sharpened_image) cv2.imshow("Unsharp Masking", unsharp_image) cv2.waitKey(0) cv2.destroyAllWindows() **sobel_edge_detect*** import cv2 import numpy as np # Load the image in grayscale image = cv2.imread("corners.jpg", cv2.IMREAD_GRAYSCALE) # Sobel filters for edge detection SobelX = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) SobelY = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) # Apply Sobel filters and compute gradients in float32 gradient_x = cv2.filter2D(image, -1, SobelX).astype(np.float32) gradient_y = cv2.filter2D(image, -1, SobelY).astype(np.float32) # Compute the magnitude of the gradients edges = cv2.magnitude(gradient_x, gradient_y) # Display images cv2.imshow("Original Image", image) cv2.imshow("Sobel Edge Detection", edges.astype(np.uint8)) cv2.waitKey(0) cv2.destroyAllWindows() ***template_matching*** import cv2 # Load images in grayscale image = cv2.imread("elephant2.jpeg", cv2.IMREAD_GRAYSCALE) template = cv2.imread("elephant_template.jpeg", cv2.IMREAD_GRAYSCALE) # Match template result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) top_left = cv2.minMaxLoc(result)[3] # Get max_loc directly w, h = template.shape[::-1] # Draw rectangle on the matched area cv2.rectangle(image, top_left, (top_left[0] + w, top_left[1] + h), 255, 2) # Display results cv2.imshow("Matched Image", image) cv2.waitKey(0) cv2.destroyAllWindows() **thresholding*** import cv2 image_path = "elephant2.jpeg" image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) threshold = 125 #0 - 255 _,binary_image = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)#250 > 128 => 255 cv2.imshow("Original Image", image) cv2.imshow("Thresholded Image", binary_image) cv2.waitKey(0) cv2.destroyAllWindows() ****up_down scaling** import cv2 # Load the image image = cv2.imread("elephant2.jpeg") # Downsample and Upsample downsampled_image = cv2.resize(image, (image.shape[1] // 2, image.shape[0] // 2)) upsampled_image = cv2.resize(image, (int(image.shape[1] * 1.5), int(image.shape[0] * 1.5))) # Display images cv2.imshow("Original Image", image) cv2.imshow("Downsampled Image", downsampled_image) cv2.imshow("Upsampled Image", upsampled_image) cv2.waitKey(0) cv2.destroyAllWindows()