开发者

基于Python开发图像数据清洗&图像质量检查工具

随着目前采集的数据集中的图像越来越多,出现了数据格式十分杂乱、质量不统一、部分图像存在损坏等各种问题。

本程序提供各种图像数据清洗和图像质量检查功能,防止模型训练加载数据时出现各种异常。

1.使用各种方式读取图像,用于检查图像是否损坏

2.读取图像exit信息,用于防止标注异常

3.记录图像信息

① 图像编码格式、分辨率、通道数、文件大小,便于判断图像其他属性

② MD5,PHash16等值,用于判断是否存在重复

③ 峰值信噪比(PSNR)、结构相似性(SSIM)等,用于判断图像质量

完整代码

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 功能:图像数据清洗&图像质量检查
# 作者:AYangSN
# 时间:2025-03-12
# 版本:1.0


# here is important imports
import csv
import os
import sys
import glob
import shutil
import argparse
import cv2
import hashlib
import imagehash
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageOps, ExifTags
import pandas as pd
from concurrent.futures import ThreadPoolExecutor, as_completed
from skimage.metrics import structural_similarity as ssim
from scipy.stats import entropy


def check_image_with_pil(filepath):
    """使用PIL检查图像是否损坏"""
    try:
        img = Image.open(filepath)
        img.verify()  # 验证图像完整性
        img = Image.open(filepath)  # 再次打开以确保图像可以正常加载
        return True, img
    except Exception as e:
        return False, str(e)

def check_image_with_opencv(filepath):
    """使用OpenCV检查图像是否损坏"""
    try:
        image = cv2.imread(filepath)
        if image is None or image.size == 0:
            return False, "OpenCV无法加载图像"
        retjsurn True, image
    except Exception as e:
        return False, str(e)

def check_file_header(filepath):
    """通过读取文件头信息检查图像格式是否正确"""
    valid_headers = {
        'JPEG': b'\xff\xd8\xff',
        'PNG': b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a',
        'GIF87a': b'GIF87a',
        'GIF89a': b'GIF89a',
        'BMP': b'BM'
    }
    with open(filepamgLvbth, 'rb') as f:
        header = f.read(8)  # 读取前8个字节以覆盖所有格式
        for format, magic in valid_headers.items():
            if header.startswith(magic):
                return True, None
    return False, "未知的文件头"


def get_exif_orientation(image):
    try:
        exif = image._getexif()
    except AttributeError:
        exif = None
    if exif is None:
        return None
    exif = {
        ExifTags.TAGS[k]: v
        for k, v in exif.items()
        if k in ExifTags.TAGS
    }
    # 获取图像方向信息
    orientation = exif.get('Orientation', None)
    return orientation


def exif_update_image_files(image, orientation, image_file, output_dir):
    '''根据参数旋转图片'''
    if orientation == 2:
        # left-to-right mirror
        image = ImageOps.mirror(image)
    elif orientation == 3:
        # rotate 180
        image = image.transpose(Image.ROTATE_180)
    elif orientation == 4:
        # top-to-bottom mirror
        image = ImageOps.flip(image)
    elif orientation == 5:
        # top-to-left mirror
        image = ImageOps.mirror(image.transpose(Image.ROTATE_270))
    elif orientation == 6:
        # rotate 270
        image = image.transpose(Image.ROTATE_270)
    elif orientation == 7:
        # top-to-right mirror
        image =  ImageOps.mirror(image.transpose(Image.ROTATE_90))
    elif orientation == 8:
        # rotate 90
        image =www.devze.com image.transpose(Image.ROTATE_90)
    else:
        pass
    
    # 生成输出路径
    outpath = "{}/{}".format(output_dir, orientation)
    os.makedirs(outpath, exist_ok=True)

    # 使用opencv读取,去除exif信息
    img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)

    # 获取图像名
    _, imgname = os.path.split(image_file)
    
    # 重新保存图片
    cv2.imwrite(outpath+'/'+imgname, img)


def compute_md5(filepath):
    """计算文件的MD5值"""
    hash_md5 = hashlib.md5()
    with open(filepath, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()


def compute_phash(imgpath, hash_size=16):
    # 计算图像的phash值
    img = Image.open(imgpath)
    phash = imagehash.phash(img, hash_size=hash_size, highfreq_factor=4)
    hex_string = str(phash)
    return hex_string


def diff_phash(p1, p2, hash_size = 8):
    # 计算两个phash值之间的相似度差异
    return (p1 - p2) / hash_size ** 2


def check_blur(image, ref_image=None):
    """综合评估图像的模糊质量"""
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Laplacian 方差
    laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()

    # 傅里叶变换
    f = np.fft.fft2(gray)
    fshift = np.fft.fftshift(f)
    magnitude_spectrum = 20 * np.log(np.abs(fshift))
    fourier_energy = np.sum(magnitude_spectrum) / (magnitude_spectrum.shape[0] * magnitude_spectrum.shape[1])

    # Tenengrad 方法
    gradient_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
    gradient_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
    gradient_magnitude = np.sqrt(gradient_x**2 + gradient_y**2)
    tenengrad_value = np.mean(gradient_magnitude)

    # 熵
    hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
    hist_norm = hist.ravel() / hist.max()
    entropy_value = entropy(hist_norm, base=2)

    # SSIM(如果有参考图像)
    ssim_score = None
    if ref_image is not None:
        gray_ref = cv2.cvtColor(ref_image, cv2.COLOR_BGR2GRAY)
        ssim_score, _ = ssim(gray, gray_ref, full=True)

    return laplacian_var, fourier_energy, tenengrad_value, entropy_value, ssim_score


def process_images(filepath, output_dir):
    # 获取文件扩展名
    file_extension = os.path.splitext(filepath)[1].lower()

    # 检查图像是否损坏
    pil_result, img_pil = check_image_with_pil(filepath)
    opencv_result, img_opencv = check_image_with_opencv(filepath)
    header_result, header_error = check_file_header(filepath)

    # 如果图像没有损坏,则继续处理
    if pil_result and opencv_result and header_result:
        
        # 获取文件大小  字节(bytes)
        file_size = os.path.getsize(filepath)
        
        # 获取分辨率
        width, height = img_pil.size
        
        # 获取颜色模式
        color_mode = img_pil.mode
        
        # 获取位深度
        bit_depth = img_pil.bits if hasattr(img_pil, 'bits') else None
        
        # 获http://www.devze.com取通道数
        channels = len(color_mode) if isinstance(color_mode, str) else None
        
        # 获取压缩类型
        compression = img_pil.info.get('compression', 'Unknown')
        
        # 获取EXIF数据
        orientation = get_exif_orientation(img_pil)
        
        # 根据旋转信息更新图像
        if not (orientation is None or orientation==1):
            exif_update_image_files(img_pil, orientation, filepath, os.path.join(output_dir,'exif'))

        # 计算MD5校验码
        md5_checksum = compute_md5(filepath)

        # 计算phash16校验码
        hex_string = compute_phash(filepath, hash_size=16)

        # # 获取直方图
        # hist = img_pil.histogram()

        laplacian_var, fourier_energy, tenengrad_value, entropy_value, ssim_score = check_blur(img_opencv)

        log_entry = {
            'filename': filepath,
            'file_extension': file_extension,
            'pil_check': pil_result,
            'opencv_check': opencv_result,
            'header_checkandroid': header_result,
            'header_error': header_error,
            'file_size': file_size,
            'resolution': (width, height),
            'color_mode': color_mode,
            'bit_depth': bit_depth,
            'channels': channels,
            'compression': compression,
            'exif_data': orientation,
            'md5_checksum': md5_checksum,
            'phash16_checksum': hex_string,
            'laplacian_var': laplacian_var,
            'fourier_energy': fourier_energy,
            'tenengrad_value': tenengrad_value,
            'entropy_value': entropy_value,
            'ssim_score': ssim_score
        }
    else:
        log_entry = {
            'filename': filepath,
            'file_extension': file_extension,
            'pil_check': pil_result,
            'opencv_check': opencv_result,
            'header_check': header_result,
            'header_error': header_error,

        }
        # 将损坏的文件复制到指定的输出目录下
        shutil.copy(filepath, os.path.join(output_dir, 'broken'))

    # 输出结果
    print(f"文件名: {filepath}")
    print(f"PIL检查: {'成功' if pil_result else '失败'}")
    print(f"OpenCV检查: {'成功' if opencv_result else '失败'}")
    print(f"文件头检查: {'成功' if header_result else '失败'} - {header_error}")
    print("-" * 40)

    return log_entry


def write_to_csv(log_entries, output_path):
    fieldnames = [
        'filename', 'file_extension', 'pil_check', 'opencv_check', 'header_check', 'header_error', \
        'file_size', 'resolution', 'color_mode', 'bit_depth','channels', 'compression', 'exif_data', 'md5_checksum', 'phash16_checksum', \
        'laplacian_var', 'fourier_energy', 'tenengrad_value', 'entropy_value', 'ssim_score'
    ]
    mode = 'a' if os.path.exists(output_path) else 'w'
    with open(output_path, mode, newline='', encoding='utf-8-sig') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        if mode == 'w':
            writer.writeheader()
        for entry in log_entries:
            writer.writerow(entry)


def main(input_dir, output_dir):
    os.makedirs(output_dir, exist_ok=True)
    output_csv_path = os.path.join(output_dir, 'image_integrity_report.csv')

    filepaths = []
    # 遍历输入目录下的所有文件,包括子目录
    for root, dir, fs in tqdm(os.walk(input_dir), desc='Processing Images...'):
        filepaths.extend([os.path.join(root, f) for f in fs if f.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tiff'))])
    print(f"Found {len(filepaths)} images to process.")

    # 使用线程池进行并行处理
    BATch_size = 100  # 每次处理的批大小
    with ThreadPoolExecutor(max_workers=4) as executor:
        futures = {executor.submit(process_images, fp, output_dir): fp for fp in filepaths}
        processed_entries = []
        for future in tqdm(as_completed(futures), desc='Writing CSV...'):
            try:
                log_entry = future.result()
                processed_entries.append(log_entry)
                # print(f"log_entry: {log_entry}")
                # 当达到批次大小时写入CSV
                if len(processed_entries) >= batch_size:
                    write_to_csv(processed_entries, output_csv_path)
                    processed_entries.clear()
            except Exception as exc:
                print(f'{futures[future]} generated an exception: {exc}')

        # 写入剩余的数据
        if processed_entries:
            write_to_csv(processed_entries, output_csv_path)

    print("报告已生成.")


if __name__ == "__main__":
    # 示例用法
    input_directory = "your_inputpath"
    output_directory = "your_outputpath"
    main(input_directory, output_directory)

到此这篇关于基于Python开发图像数据清洗&图像质量检查工具的文章就介绍到这了,更多相关Python图像数据清洗和质量检查内容请搜索编程客栈(www.devze.com)以前的文章或继续浏览下面的相关文章希望大家以后多多支持编程客栈(www.devze.com)!

0

上一篇:

下一篇:

精彩评论

暂无评论...
验证码 换一张
取 消

最新开发

开发排行榜