Я нашел ответ.Сценарий должен создать страницу публикации вместо элемента списка.Затем он добавит соответствующий Wiki Page Layout и настроит все для вас.Вот мой новый скрипт, который работает для всех, у кого также может быть эта проблема. Обратите внимание, что он также заменяет ссылки doku ссылками Sharepoint -:
##################################################################################################################
# Wiki Page Creater #
##################################################################################################################
[System.Reflection.Assembly]::LoadWithPartialName("Microsoft.SharePoint") #Setup Sharepoint Functions
#Commonly Used Variables To Set
$baseSite = "http://sharepointSite" #Main Sharepoint Site
$subSite = "/sites/subSite" #Sharepoint Wiki sub-site
$pageFolder = "Pages" #Directory of Wiki Pages
$fileFolder = "Media" #Directory of Files
## Setup Basic sites and pages
$site = New-Object Microsoft.SharePoint.SPSite($baseSite+$subSite) #Sharepoint Site
$psite = New-Object Microsoft.SharePoint.Publishing.PublishingSite($baseSite+$subSite) #Publishing Site
$ctype = $psite.ContentTypes["Enterprise Wiki Page"] #Get Enterprise Wiki page content type
$layouts = $psite.GetPageLayouts($ctype, $true) #Get Enterprise Wiki page layout
$layout = $layouts[0] #Choose first layout
$web = $site.OpenWeb(); #Site.Rootweb
$pweb = [Microsoft.SharePoint.Publishing.PublishingWeb]::GetPublishingWeb($web) #Get the Publishing Web Page
$pages = $pweb.GetPublishingPages($pweb) #Get collection of Pages from webpage
## Get files in exported folder and parse them
$files=get-childitem ./testEx -rec|where-object {!($_.psiscontainer)} #Get all files recursively
foreach ($file in $files) { #Foreach file in folder(s)
$name=$file.Name.Substring(0, $file.Name.IndexOf(".htm")) #Get file name, stipped of extension
#Prep Destination url for new pages
$strDestURL=$subSite+$pageFolder+"/"+$name+".aspx"
#End Prep
$page = $pages.Add($strDestURL, $layout) #Add a new page to the collection with Wiki layout
$item = $page.ListItem #Get list item of the page to access fields
$item["Title"] = $name; #Set Title to file name
$cont = Get-Content ./testEx/$file #Read contents of the file
[string]$cont1 = "" #String to hold contents after parsing
### HTML PARSING
foreach($line in $cont){ #Get each line in the contents
#############################################
# Replacing Doku URI with Sharepoint URI #
#############################################
## Matching for hyperlinks and img src
$mat = $mod -match ".*href=`"/Media.*`"" #Find href since href and img src have same URI Structure
if($mat){ #If a match is found
foreach($i in $matches){ #Cycle through all matches
[string]$j = $i[0] #Set match to a string
$j = $j.Substring($j.IndexOf($fileFolder)) #Shorten string for easier parsing
$j = $j.Substring(0, $j.IndexOf("amp;")+4) #Remove end for easier parsing
$mod = $mod.Replace($j, $j.Substring(0, $j.IndexOf("id="))+$j.Substring($j.IndexOf("&")+5)) #Replace all occurances of original with two sections
}
}
## Matching for files and images
$mat = $mod -match "`"/Media.*`"" #Find all Media resources
if($mat){ #If match is found
[string]$j = $matches[0] #Set match to a string
$j = $j.Substring(0, $j.IndexOf("class")) #Shorten string for easier parsing
$j = $j.Substring($j.IndexOf($fileFolder)) #Sorten
$j = $j.Substring(0, $j.LastIndexOf(":")+1) #Remove end for easier parsing
$mod = $mod.Replace($j, $j.Substring(0, $j.IndexOf($fileFolder)+5)+$j.Substring($j.LastIndexOf(":")+1)) #Replace all occurances of original with two sections
}
$mod = $mod.Replace("/"+$fileFolder, $subSite+"/"+$fileFolder+"/") #make sure URI contains base site
## Matching for links
$mat = $mod -match "`"/Page.*`"" #Find all Page files
if($mat){ #If match is found
[string]$j = $matches[0] #Set match to a string
if($j -match "start"){ #If it is a start page,
$k = $j.Replace(":", "-") #replace : with a - instead of removing to keep track of all start pages
$k = $k.Replace("/"+$pageFolder+"/", "/"+$pageFolder) #Remove following / from Pages
$mod = $mod.Replace($j, $k) #Replace old string with the remade one
}
else{ #If it is not a start page
$j = $j.Substring(0, $j.IndexOf("class")) #Stop the string at the end of the href so not to snag any other :
$j = $j.Substring($j.IndexOf($pageFolder)) #Start at Pages in URI
$j = $j.Substring(0, $j.LastIndexOf(":")+1) #Now limit down to last :
$mod = $mod.Replace($j, $j.Substring(0, $j.IndexOf($pageFolder)+5)+$j.Substring($j.LastIndexOf(":")+1)) #Replace all occurances of original with two sections
}
[string]$j = $mod #Set the match to a new string
$j = $j.Substring(0, $j.IndexOf("class")) #Stop at class to limit extra "
$j = $j.Substring($j.IndexOf($pageFolder)) #Start at Pages in URI
$j = $j.Substring(0, $j.LastIndexOf("`"")) #Grab ending "
$rep = $j+".aspx" #Add .aspx to the end of URI
$mod = $mod.Replace($j, $rep) #Replaced old URI with new one
}
$mod = $mod.Replace("/"+$pageFolder, $subSite+"/"+$pageFolder+"/") #Add base site to URI
$cont1 += $mod #Add parsed line to the new HTML string
}
##### END HTML PARSING
$item["Page Content"] = $cont1 #Set Wiki page's content to new HTML
$item.Update() #Update the page to set the new fields
}
$site.Dispose() #Dispose of the open link to site
$web.Dispose() #Dispose of the open link to the webpage