как определить, светлый или темный фон по сравнению с текстом на изображении, используя python - PullRequest
0 голосов
/ 06 мая 2020

введите описание изображения здесь введите описание изображения здесь Для применения некоторых функций в моем проекте я должен изначально знать, темный или светлый текст Относительно к фон.

Я попытался каким-то образом удалить фон, а затем расширил его, инвертировал изображение и преобразовал в двоичное, а затем нашел контуры. Если у меня получается только один контур, то это темный текст на светлом фоне. Но проблема в том, что даже если это светлый текст на темном фоне, я получаю только один контур

import cv2
import numpy as np
for f in range(0,38):
    name = "font20_" + str(f+1) + ".jpg"
    print(name)
    image = cv2.imread("C:/Users/HP/Desktop/FRN/fontsize20/" + name)
    image = cv2.resize(image, (800,600), interpolation = cv2.INTER_AREA)
    cv2.imshow("original",image)
    ima = image.copy()
    ima2 = image.copy()
    rgb_planes = cv2.split(image)
    result_planes_blur = []
    result_planes_dilate = []
    result_norm_planes_blur = []
    result_norm_planes_dilate = []
    for plane in rgb_planes :
        # For removing shadows
        dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
        # cv2.imshow('dila', dilated_img)
        # cv2.waitKey(0)
        blur_img = cv2.medianBlur(plane, 21)
        # cv2.imshow('blur', blur_img)
        blur_img_2 = cv2.medianBlur(blur_img, 21)
        # cv2.imshow('blur2', blur_img_2)
        blur_dil_img = cv2.medianBlur(dilated_img,21)
        # cv2.imshow("blur_dil_img",blur_dil_img)
        #cv2.waitKey(0)
        diff_img_blur = cv2.absdiff(plane, blur_img_2)
        diff_img_dilate =cv2.absdiff(plane,blur_dil_img)
        diff_img_blur = 255-diff_img_blur
        diff_img_dilate = 255-diff_img_dilate
        # cv2.imshow("diff_blur2",diff_img_blur)
        # cv2.imshow("diff_dil",diff_img_dilate)
        #cv2.waitKey(0)
        norm_img_blur = np.zeros((diff_img_blur.shape[0],diff_img_blur.shape[1], 1), dtype = np.uint8)
        norm_img_dilate = np.zeros((diff_img_dilate.shape[0],diff_img_dilate.shape[1], 1), dtype = np.uint8)
        cv2.normalize(diff_img_blur, norm_img_blur , 0, 255,cv2.NORM_MINMAX,dtype = cv2.CV_8UC1)
        cv2.normalize(diff_img_dilate, norm_img_dilate, 0, 255,cv2.NORM_MINMAX,dtype = cv2.CV_8UC1)
        # cv2.imshow("norm_img_blur",norm_img_blur)
        # cv2.imshow("norm_img_dilate",norm_img_dilate)
        #cv2.waitKey(0)
        result_planes_blur.append(diff_img_blur)
        result_norm_planes_blur.append(norm_img_blur)
        result_planes_dilate.append(diff_img_dilate)
        result_norm_planes_dilate.append(norm_img_dilate)

    result_blur = cv2.merge(result_planes_blur)
    result_norm_blur = cv2.merge(result_norm_planes_blur)
    result_dilate = cv2.merge(result_planes_dilate)
    result_norm_dilate = cv2.merge(result_norm_planes_dilate)
    # cv2.imshow('norm_blur', result_norm_blur) #final image but with the outline of the finger
    # cv2.imshow('norm_dilate', result_norm_dilate)
    # cv2.imshow('diff_blur', result_blur)
    # cv2.imshow('diff_dilate', result_dilate)
    #cv2.waitKey(0)


    '''
    Exponential Transform
                            '''
    gray_blur = cv2.cvtColor(result_norm_blur,cv2.COLOR_BGR2GRAY)
    gray_dil = cv2.cvtColor(result_norm_dilate,cv2.COLOR_BGR2GRAY)
    # cv2.imshow('gray_blur',gray_blur)
    # cv2.imshow('gray_dilate',gray_dil)
    #cv2.waitKey(0)
    processed_image_blur = gray_blur*0.02
    # cv2.imshow("multi",processed_image_blur)
    #cv2.waitKey(0)
    out_blur = np.exp(processed_image_blur)
    # cv2.imshow("expo_blur",out_blur)
    #cv2.waitKey(0)
    out_blur = out_blur - 1
    # cv2.imshow("sub_blur",out_blur)
    #cv2.waitKey(0)
    cv2.normalize(out_blur, out_blur, 0, 255, cv2.NORM_MINMAX)
    # cv2.imshow("norm_blur",out_blur)
    #cv2.waitKey(0)
    inv_blur = 255 - out_blur
    inv_blur = np.uint8(inv_blur)
    # cv2.imshow("inverted_blur",inv_blur)
    #cv2.waitKey(0)
    ret,thresh_blur = cv2.threshold(inv_blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # cv2.imshow("afterthresh_otsu_blur",thresh_blur)
    # cv2.waitKey(0)
    processed_image_dil = gray_dil
    processed_image_dil = processed_image_dil*0.02
    # cv2.imshow("multi_dil",processed_image_dil)
    #cv2.waitKey(0)
    out_dil = np.exp(processed_image_dil)
    # cv2.imshow("expo_dil",out_dil)
    #cv2.waitKey(0)
    out_dil = out_dil - 1
    # cv2.imshow("sub_dil",out_dil)
    #cv2.waitKey(0)
    cv2.normalize(out_dil, out_dil, 0, 255, cv2.NORM_MINMAX)
    # cv2.imshow("norm_dil",out_dil)
    #cv2.waitKey(0)
    inv_dil = 255 - out_dil
    inv_dil = np.uint8(inv_dil)
    # cv2.imshow("inverted_dil",inv_dil)
    #cv2.waitKey(0)
    ret4,thresh_dil=cv2.threshold(inv_dil,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    # cv2.imshow("afterthresh_otsu_dil",thresh_dil)
    # cv2.waitKey(0)
    res =cv2.bitwise_and(thresh_blur,thresh_dil)
    # res=cv2.dilate(res,np.ones((5,5),np.uint8))
    cv2.imshow("Old_res",res)
    # res=255-res
    # cv2.imshow("Res",res)
    cnts,_=cv2.findContours(res,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    print("No of Contours:"+str(len(cnts)))
    count=0
    print(type(cnts[1]))
    for i in cnts:
        area=cv2.contourArea(i)
        print("Area of Contour:"+str(area))
        print("No of Points:"+str(len(i)))
        if(area>125000):

            cv2.drawContours(ima,[i],-1,(0,255,0),2)
            count+=1
    print(count)
    # cv2.imshow("conts",ima)
...