При запуске приложения выдается следующая ошибка:
Ошибка в .getReactiveEnvironment () $ currentContext (): операция запрещена без активного реактивного контекста. (Вы пытались сделать что-то, что можно сделать только из реактивного выражения или наблюдателя.)
Я впервые попробовал блестящие приложения, поэтому я мог упустить какой-то ключевой аспект, который Я не могу понять. Буду признателен, если кто-то может помочь.
Вот код, который я сделал:
UI:
library(shiny)
shinyUI(pageWithSidebar(
headerPanel(h1('Sentiment Analyzer for .txt Files'),
windowTitle = "Sentiment Analysis"),
# Getting User Inputs
sidebarPanel(
fileInput("fileInput","Please Upload the File"),
textInput("findPhr5ase", "Enter data to be searched"),
submitButton('Analyze')
),
mainPanel(
sidebarPanel(
verbatimTextOutput("file_Health1")
),
sidebarPanel(
textOutput("file_Health2")
),
sidebarPanel(
textOutput("search_Result1")
),
sidebarPanel(
verbatimTextOutput("search_Result2")
),
sidebarPanel(
verbatimTextOutput("search_Result3")
),
sidebarPanel(
wordcloud2Output("sentimentCloud", width = "100%", height = "400px")
),
sidebarPanel(
plotOutput("sentiment_Analysis", width = "40%", height = "200px")
),
sidebarPanel(
plotOutput("topSentiments", width = "40%", height = "200px")
),
sidebarPanel(
dataTableOutput("sentimentScores")
)
)
)
)
SERVER:
shinyServer <- function(input, output) {
observeEvent(input$analysis, {
#Function to read file
searchText <- reactive(renderText({input$findPhrase}))
#fileType <- renderText({input$selectType})
read_data_from_file <- function (){
text <- reactive(readtext({input$fileInput}))
text <- toString(text[,2])
text<- gsub("\n", "", text)
text <- gsub("[\r\n]", "", text)
return(text)
}
check_data <- function (){
if (is.null(read_data_from_file()) || is.null(searchText)) {
return("File or Search Text is empty")
}
else {
return("Starting to analyze file")
}
}
output$file_Health1 <- renderPrint(check_data())
#WORD COUNT
output$file_Health2 <- renderText(sapply(read_data_from_file(),stri_stats_latex)['Words',])
#Function to clean the text file
clean_data <- function(text){
#Remove NA values
NAindices = which(is.na(text))
if (is.numeric(NAindices) & length(NAindices) > 0) {
text <- text[-c(NAindices)]
}
#Remove single letters and hyphen
text <- gsub(pattern = "\\b[A-z]\\b{1}", replace = " ", text)
text <- gsub(pattern = "[-]", replace = " ", text)
#Conversion to corpus
docs <- Corpus(VectorSource(text))
#Cleaning the corpus
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
#Remove Stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
docs <- tm_map(docs, removeWords, c("don", "s", "t"))
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
#Rewriting as text file
writeCorpus(docs, filenames = "clean.txt")
#Reading cleaned text file
text1 <- readtext("clean.txt")
text1 <- toString(text1[,2])
return(text1)
}
#Function to tokenize the words
tokenize <- function(text){
words = tokenize_words(text)
return (words)
}
#Hashmap function to get frequency of all words
get_freq_map <- function(words) {
hashmap <- hashmap("character")
for(word in words){
hashmap[word] <- 0
}
for(word in words){
hashmap[word] <- as.numeric(hashmap[word]) + 1
}
return (hashmap)
}
#Calling the functions
text2 = tokenize(clean_data(read_data_from_file()))
freq_map = get_freq_map(text2[[1]])
#Function to find frequency of the word given as input
search_keyword_freq <- function(X) {
return (freq_map[X])
}
#Word count Frequency function
output$search_Result1 <-renderText(search_keyword_freq(searchText))
#Reading text file
text = read_data_from_file()
#Tokeninzing the sentences
sentences <- tokenize_sentences(text)
#Function to create a vector of line numbers where the searched word occurs
line_nums <- vector()
for (i in 1:length(sentences[[1]])) {
filtered_sent_words = tokenize(clean_data(sentences[[1]][i]))
sentence_freq_map = get_freq_map(filtered_sent_words[[1]])
freq <- tryCatch(sentence_freq_map[searchText], error = function(e) NULL)
if (!(is.null(freq))) {
if (freq > 0) {
line_nums <- c(line_nums, i)
}
}
}
#Printing the vector and sentences where the word occurs
output$search_Result2 <- renderPrint(line_nums)
output$search_Result3 <- renderPrint(for (i in 1:length(line_nums)) {
print(sentences[[1]][line_nums[i]])})
##################################################################
# Word Cloud
##################################################################
# take all the phrases
df_data <- data.frame(text = sapply(docs, as.character), stringsAsFactors = FALSE)
# add an id, from 1 to n
df_data$doc_id <- row.names(df_data)
#Tokenize
tidy_docs <- df_data %>% unnest_tokens(word, text)
par(mar = c(1,1,1,1))
# split all the words and Word Cloud
output$sentimentCloud <- renderWordcloud2(tidy_docs %>%inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(scale=c(4,.3), colors = c("#00B2FF", "red", "#FF0099", "#6600CC"), title.size = 1,
title.colors=c("red","blue"),title.bg.colors=c("grey40","grey70"),max.words = 100),
env = parent.frame(), quoted = FALSE)
####################################################################
# polarity
####################################################################
#calculate polarity on text column by grouping doc_id
df_polarity <- polarity(text.var = df_data$text,
grouping.var = df_data$doc_id,
constrain = TRUE,
polarity.frame = key.pol,
negators = qdapDictionaries::negation.words,
amplifiers = qdapDictionaries::amplification.words,
deamplifiers = qdapDictionaries::deamplification.words)
polarity_score <- scores(df_polarity)
polarity_res <- as.data.frame((counts(df_polarity)))
####################################################################
# sentiment analysis
####################################################################
df_data2 <- df_data$text
data_vec <- as.vector(df_data2)
df_emotion <- get_nrc_sentiment(data_vec)
df_emotion_sub <- df_emotion[c(1:8)]
#get the top emotions for each sentence
df_emotion_sub$top_emotions <- apply(df_emotion_sub[-1], 1, FUN = function(x)
paste(names(head(sort(x[x!=0], decreasing = TRUE), 3)), collapse=", "))
#build the final dataframe containing the results of polarity and sentiment analysis
pos_vector <- as.character(polarity_res$pos.words)
neg_vector <- as.character(polarity_res$neg.words)
neg_words <- polarity_res$neg.words
#creating the final data frame
df_final <- data.frame(
text = df_data$text,
words = polarity_res$wc,
positive_words = pos_vector,
negative_words = neg_vector,
polarity = polarity_score$ave.polarity,
emotions = df_emotion_sub$top_emotions
)
################################################
df_final$Pos_Neg <- ifelse(df_final$polarity > 0,"Positive",
ifelse(df_final$polarity < 0,"Negative","Neutral"))
output$sentiment_Analysis <- renderPlot(ggplot(df_final, aes(x = factor(1), y = polarity)) +
geom_boxplot(width = 0.4, fill = "white") +
geom_jitter(aes(color = Pos_Neg, shape = Pos_Neg),
width = 0.1, size = 1) +
scale_color_manual(values = c("#00AFBB", "#E7B800", "tomato1")) +
labs(x = NULL)) # Remove x axis label
#counting frequencies of emotions
f<-data.frame(table(unlist(strsplit(tolower(df_final$emotions), ", "," ,")))) %>% mutate(Freq = sort(Freq))
#######
bar <- ggplot(data = f) +
geom_bar(
mapping = aes(x = Var1, fill = Freq),
show.legend = TRUE,
width = 1
) +
theme(aspect.ratio = 1) +
labs(x = NULL, y = NULL)
output$topSentiments <- renderPlot(bar + coord_flip())
output$sentimentScores <- renderDataTable(df_final[["text","positive_words","negative_words","polarity"]])
})
}
Спасибо, что помогли мне с этим.