Я получил приведенную ниже таблицу данных, созданную с использованием файлов PDF, текста и слов. Я классифицировал различные заголовки для модуля, используя словарный подход. Теперь мне нужно получить значения из предложений и получить структурированные данные.
В настоящее время данные выглядят так:
header Module
ADDITIONAL BONUS Payroll Pay Element
EMPLOYEE ID: 4564576 Employee ID
AMOUNT: 1200 USD Amount
EMPLOYEE NAME: ANDY CHEN Employee Name
SPOT AWARD Payroll Pay Element
EMPLOYEE ID: 7463453 Employee ID
AMOUNT: 200 USD Amount
EMPLOYEE NAME: MICHAEL HISHAM Employee Name
REALOCATION BONUS Payroll Pay Element
EMPLOYEE ID: 7467673 Employee ID
AMOUNT: 1400 USD Amount
EMPLOYEE NAME: AYMAN HISHAM Employee Name
REALOCATION BONUS Payroll Pay Element
EMPLOYEE ID: 7467673 Employee ID
AMOUNT: 1400 USD Amount
EMPLOYEE NAME: AYMAN HISHAM Employee Name
Не уверен, как добиться желаемого результата в структурированном формате, таком какниже:
Employee ID Employee Name Payroll Pay Element Amount
4564576 ANDY CHEN ADDITIONAL BONUS 1200 USD
7463453 MICHAEL HISHAM SPOT AWARD 200 USD
7467673 AYMAN HISHAM REALOCATION BONUS 1400 USD
7467673 AYMAN HISHAM REALOCATION BONUS 1400 USD
Follwing-код, который я использовал для получения первых данных таблицы из различных документов.
import re,os, subprocess, pandas as pd, numpy as np
from nltk.util import ngrams
from nltk.tokenize import word_tokenize
import nltk
####################################PATH TO SAVE FILES###########################################
os.chdir(r"C:\Analytics\Microsoft\One_Time_Payment")
###################Converting CVs from Various format to txt Format ###########
path_PDF = r"\pdf"
path_DOC = r"\word"
path_TXT = r"\txt"
path_ERROR=r"\ERROR"
try:
with subprocess.Popen([os.path.join(path_DOC, "docto.exe"), '-f', path_DOC, '-O', path_TXT,'-T','wdFormatTEXT'], stdout=subprocess.PIPE) as proc:
print(proc.stdout.read())
except Exception:
pass
all_pdf = os.listdir(path_PDF)
for each_file in all_pdf:
print("DEL : ",each_file)
print("Test above")
if each_file.endswith('.pdf'):
# cnd_id = (re.findall('\d+', each_file ))
txt_filename = each_file.partition('.')[0] + ".txt"
subprocess.call([os.path.join(path_PDF, "pdftotext.exe"), '-raw', '-eol', 'dos','-layout',
os.path.join(path_PDF, each_file),
os.path.join(path_TXT, txt_filename)])
#####################Processing texts ###########################################
counter = 1
all_text = os.listdir(path_TXT)
#all_text = [os.path.splitext(filename)[0] for filename in os.listdir(path_TXT)]
# only in case extra file is gettign created
#error_file=pd.DataFrame(index=['sno'],columns=['msg'])
for each_file in all_text:
try:
print(os.path.join(path_TXT, each_file))
inpdata = pd.read_table(os.path.join(path_TXT, each_file), header=None,encoding = "ISO-8859-1",sep='\n')
# cnd_id = (re.findall('\d{6}', each_file ))
except Exception as e1:
try:
inpdata = pd.read_table(os.path.join(path_TXT, each_file), header=None, engine='python',encoding = "ISO-8859-1",sep='\n',error_bad_lines=False)
except Exception as ec:
print("Python Engine E ::>> ",ec)
str_err = "Could not process the file %s" % (each_file)
print(str_err)
continue
# try:
# inpdata = pd.read_table(os.path.join(path_TXT, each_file), header=None,encoding = "ISO-8859-1",sep='\n', error_bad_lines=False)
# except Exception as ec:
# error_file["msg"]=str_err
inpdata.columns = ["Sentences"]
inpdata['Sentences'] = inpdata['Sentences'].str.upper()
# inpdata['Module'] = ''
inpdata_bck = inpdata
inpdata_bck['sent_id'] = range(0,len(inpdata_bck))
# inpdata_bck['cnd_id'] = str(cnd_id)
# inpdata['num_words'] = 0
inpdata['CID'] = each_file.partition('.')[0]
inpdata.dropna(subset=["Sentences"], inplace=True)
inpdata = inpdata.reset_index()
del inpdata['index']
inpdata_bck.to_csv(each_file + '.csv')
if (counter==1):
all_info = inpdata
# err_file=error_file
# err_file1=error_file1
else:
all_info = pd.concat([all_info,inpdata])
# err_file=pd.concat([err_file,error_file])
# err_file1=pd.concat([err_file1,error_file1])
counter = counter + 1
#Removing Special Characteres to avoid import #Name issue
all_info['Sentences']= all_info['Sentences'].map(lambda x: x.lstrip('+-'))
all_info = all_info.reset_index()
del all_info['index']
mod = pd.read_excel(r"Payroll_dict.xlsx", sheetname='Payroll Dictionary')
all_info['num_words'] = 0
all_info['Module'] = ''
all_info = all_info[pd.notnull(all_info['Sentences'])]
all_info = all_info.reset_index()
del all_info['index']
categoryDict = {}
modules = list(set(mod['Module']))
for module in modules:
categoryDict[module] = mod.loc[mod['Module'] == module, 'Header']
#Function for Module Assignment
def module_assign(inpdata):
######################### Module Creation ##############################
curr_sent_value=inpdata['Sentences']
if inpdata['num_words'] <= 0:
list1=["EMPLOYEE ID:", "REFERENCE ID"]
if curr_sent_value in list1:
return 'Employee ID'
list2=["Amount", "AMOUNTS"]
if curr_sent_value in list2:
return 'Amount'
else:
for key in categoryDict.keys():
for word in categoryDict[key]:
match = re.findall(word,curr_sent_value,re.I|re.M)
if len(match)>0:
return key
for CID, cid_data in all_info.groupby('CID'):
inpdata=all_info
inpdata = inpdata.reset_index()
# del inpdata['index']
inpdata['header'] = " "
# inpdata['clean_sent']=inpdata['Sentences'].map(reg_clean)
inpdata['num_words']=inpdata['Sentences'].map(lambda x: len(x.split()))
inpdata['header']=np.where(inpdata['num_words']<=5,inpdata['Sentences'],'')
inpdata = inpdata[inpdata['num_words'] > 0]
inpdata['Module']=inpdata.apply(module_assign,axis=1)
def split_the_sentence_to_words_rem_stopwrds(p_sentence):
return [token for token in word_tokenize(p_sentence)]
inpdata['split_wrd'] =inpdata['Sentences'].map(split_the_sentence_to_words_rem_stopwrds)