diff --git a/.Rproj.user/49462073/pcs/windowlayoutstate.pper b/.Rproj.user/49462073/pcs/windowlayoutstate.pper index bafcc38..5228825 100644 --- a/.Rproj.user/49462073/pcs/windowlayoutstate.pper +++ b/.Rproj.user/49462073/pcs/windowlayoutstate.pper @@ -7,7 +7,7 @@ }, "right": { "splitterpos": 531, - "topwindowstate": "MAXIMIZE", + "topwindowstate": "NORMAL", "panelheight": 959, "windowheight": 997 } diff --git a/.Rproj.user/49462073/pcs/workbench-pane.pper b/.Rproj.user/49462073/pcs/workbench-pane.pper index 4e0ea8f..d612aa7 100644 --- a/.Rproj.user/49462073/pcs/workbench-pane.pper +++ b/.Rproj.user/49462073/pcs/workbench-pane.pper @@ -1,5 +1,5 @@ { - "TabSet1": 4, + "TabSet1": 3, "TabSet2": 4, "TabZoom": {} } \ No newline at end of file diff --git a/.Rproj.user/49462073/rmd-outputs b/.Rproj.user/49462073/rmd-outputs index d3351e9..7164d7e 100644 --- a/.Rproj.user/49462073/rmd-outputs +++ b/.Rproj.user/49462073/rmd-outputs @@ -1,10 +1,8 @@ ~/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html - - ~/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html ~/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html - - +~/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html +~/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html diff --git a/.Rproj.user/49462073/sources/prop/INDEX b/.Rproj.user/49462073/sources/prop/INDEX index c6d971a..756c4cd 100644 --- a/.Rproj.user/49462073/sources/prop/INDEX +++ b/.Rproj.user/49462073/sources/prop/INDEX @@ -72,6 +72,8 @@ D%3A%2FDropbox%2FWebsite%2Fblog_netlify_backup%2Fcontent%2Fhomepage%2Fabout.md=" ~%2FGitHub%2Fblog%2F_posts%2F2024-01-02-psychometric-network-analysis-using-r%2Fpsychometric-network-analysis-using-r.Rmd="5A7A69C2" ~%2FGitHub%2Fblog%2F_posts%2F2024-01-04-introduction-to-psychometric-network-analysis%2Fintroduction-to-psychometric-network-analysis.Rmd="AC22754E" ~%2FGitHub%2Fblog%2F_posts%2F2024-01-21-lexicon-based-sentiment-analysis-using-r%2Flexicon-based-sentiment-analysis-using-r.Rmd="C9B320E1" +~%2FGitHub%2Fblog%2F_posts%2F2024-01-31-lexicon-based-sentiment-analysis-using-r%2Flexicon-based-sentiment-analysis-using-r.Rmd="8A1009CD" +~%2FGitHub%2Fblog%2F_posts%2F2024-02-09-lexicon-based-sentiment-analysis-using-r%2Flexicon-based-sentiment-analysis-using-r.Rmd="0E458B78" ~%2FGitHub%2Fblog%2F_site.yml="62F30133" ~%2FGitHub%2Fblog%2Faa.R="828026A0" ~%2FGitHub%2Fblog%2Fabout.Rmd="37398C09" diff --git a/.Rproj.user/shared/notebooks/paths b/.Rproj.user/shared/notebooks/paths index da46072..6f77ba8 100644 --- a/.Rproj.user/shared/notebooks/paths +++ b/.Rproj.user/shared/notebooks/paths @@ -1,2 +1,4 @@ C:/Users/Okan/Documents/GitHub/blog/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd="DC0393F3" +C:/Users/Okan/Documents/GitHub/blog/_posts/2024-01-31-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd="63BDC37F" +C:/Users/Okan/Documents/GitHub/blog/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd="B3CD0F59" D:/Dropbox/Research Partnerships/Research with Cheryl/Data Analysis/Sentiment analysis_wave1.v2.R="546ABDFC" diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/apa.csl b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/apa.csl similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/apa.csl rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/apa.csl diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/emoji.jpg b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/emoji.jpg rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd similarity index 51% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd index dad6974..95a242b 100644 --- a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd +++ b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.Rmd @@ -1,7 +1,7 @@ --- title: "Lexicon-Based Sentiment Analysis Using R" description: | - Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed vehicula tempor bibendum. Sed augue turpis, efficitur ut ipsum quis, fermentum pulvinar quam. Proin molestie orci erat, et condimentum dui ornare ac. Quisque sit amet lacus id nisl pellentesque sagittis. Vestibulum non urna non eros condimentum commodo. Proin vitae nulla fermentum. + In this post, we will uncover the power of lexicon-based sentiment analysis using R. I demonstrate how to harness the capabilities of lexicons like NRC and Bing to decipher the emotional pulse of your text data. With practical examples, you'll gain the skills to analyze sentiment scores and extract valuable insights from your textual data sets. ```{r, include=FALSE} bytes <- file.size("lexicon-based-sentiment-analysis-using-r.Rmd") words <- bytes/10 @@ -14,7 +14,7 @@ author: affiliation: University of Alberta affiliation_url: https://www.ualberta.ca orcid_id: 0000-0001-5853-1267 -date: 2024-01-31 +date: 2024-02-09 categories: - data science - natural language processing @@ -27,7 +27,6 @@ output: distill::distill_article: self_contained: false toc: true -draft: true --- @@ -52,21 +51,21 @@ suppressWarnings({ ## Introduction -During the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied instead of watching the news all the time. Among the several techniques I reviewed, those related to natural language processing (NLP) were the most interesting ones. So, I decided to choose one technique from this field and learn more about it. This was sentiment analysis, which is also referred to as "opinion mining" in the literature. This analytical approach enables researchers to extract and interpret emotions expressed towards a particular subject in a written text. With sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts. +During the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied rather than constantly immersing myself in pandemic-related news. After evaluating several options, I found the concepts related to natural language processing (NLP) particularly captivating. So, I opted to delve deeper into this field and explore one specific technique: sentiment analysis, also known as "opinion mining" in academic literature. This analytical method empowers researchers to extract and interpret the emotions conveyed toward a specific subject within written text. Through sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts. -Amidst the COVID-19 pandemic, numerous researchers turned to sentiment analysis to gauge public responses to news and developments surrounding the virus. This included scrutinizing user-generated content on popular social media platforms like Twitter, YouTube, and Instagram. Inspired by this approach, my colleagues and I aimed to expand upon existing research by analyzing the daily briefings delivered by public health authorities. In Alberta, Dr. Deena Hinshaw, the province's chief medical officer of health, regularly provided [updates on the region's response to the ongoing pandemic](https://www.youtube.com/watch?v=fvw_USRfXgY), Through our analysis of these public health announcements, we sought to evaluate Alberta's effectiveness in employing communication strategies during this complex public health crisis [@sentimentbulut; @sentimentpoth]. +Amidst the pandemic, I observed a significant trend among researchers who turned to sentiment analysis as a tool to measure public responses to news and developments surrounding the virus. This involved analyzing user-generated content on popular social media platforms such as Twitter, YouTube, and Instagram. Intrigued by this methodology, my colleagues and I endeavored to contribute to the existing body of research by scrutinizing the daily briefings provided by public health authorities. In Alberta, Dr. Deena Hinshaw, who used to be the province's chief medical officer of health, regularly delivered [updates on the region's response](https://www.youtube.com/watch?v=fvw_USRfXgY) to the ongoing pandemic. Through our analysis of these public health announcements, we aimed to assess Alberta's effectiveness in implementing communication strategies during this intricate public health crisis. Our investigation, conducted through the lenses of sentiment analysis, sought to shed light on the efficacy of communication strategies employed during this challenging period in public health [@sentimentbulut; @sentimentpoth]. -In this post, my aim is to guide you through the process of conducting sentiment analysis using R. Specifically, I'll delve into "lexicon-based sentiment analysis," which I'll explain more in the next section. You'll find examples of lexicon-based sentiment analysis that we've incorporated into our publications mentioned above. In upcoming posts, I'll also explore more sophisticated forms of sentiment analysis leveraging cutting-edge pre-trained models available on [Hugging Face](https://huggingface.co/docs/transformers/en/index). +In this post, I aim to walk you through the process of performing sentiment analysis using R. Specifically, I'll focus on "lexicon-based sentiment analysis," which I'll discuss in more detail in the next section. I'll provide examples of lexicon-based sentiment analysis that we've integrated into the publications referenced earlier. Additionally, in future posts, I'll delve into more advanced forms of sentiment analysis, making use of state-of-the-art pre-trained models accessible on [Hugging Face](https://huggingface.co/docs/transformers/en/index). ## Lexicon-Based Sentiment Analysis -As I began to explore sentiment analysis, I found that the most common method of extracting sentiments was through lexicon-based sentiment analysis. This technique involves the use of a specific lexicon, which is essentially the vocabulary of a language or subject, to determine the direction and strength of sentiments expressed in a given text. Some lexicons, such as the Bing lexicon [@hu2004mining], categorize words as either positive or negative. Other lexicons offer more descriptive labels for the sentiments, such as the NRC Emotion Lexicon [@mohammad2013crowdsourcing], which categorizes words based on both positive and negative sentiments and Plutchik’s [@plutchik1980general] psych evolutionary theory of basic emotions (i.e., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust). +As I learned more about sentiment analysis, I discovered that the predominant method for extracting sentiments is lexicon-based sentiment analysis. This approach entails utilizing a specific lexicon, essentially the vocabulary of a language or subject, to discern the direction and intensity of sentiments conveyed within a given text. Some lexicons, like the Bing lexicon [@hu2004mining], classify words as either positive or negative. Conversely, other lexicons provide more detailed sentiment labels, such as the NRC Emotion Lexicon [@mohammad2013crowdsourcing], which categorizes words based on both positive and negative sentiments, as well as Plutchik’s [@plutchik1980general] psych evolutionary theory of basic emotions (e.g., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust). -Lexicon-based sentiment analysis is a process that involves matching words in a given text with the words present in general-purpose lexicons like NRC and Bing. Each word is assigned a specific sentiment, such as positive or negative. The overall sentiment score of the text is then calculated by adding up the individual sentiment scores of each word. For example, if a text contains 50 positive and 30 negative words based on the Bing lexicon, the total sentiment score would be 20, which indicates that the text expresses mostly positive sentiments. Conversely, a negative value would suggest the opposite sentiment. +Lexicon-based sentiment analysis operates by aligning words within a given text with those found in widely-used lexicons such as NRC and Bing. Each word receives an assigned sentiment, typically categorized as positive or negative. The text's collective sentiment score is subsequently derived by summing the individual sentiment scores of its constituent words. For instance, in a scenario where a text incorporates 50 positive and 30 negative words according to the Bing lexicon, the resulting sentiment score would be 20. This value indicates a predominance of positive sentiments within the text. Conversely, a negative total would imply a prevalence of negative sentiments. Performing lexicon-based sentiment analysis using R can be both fun and tricky at the same time. While analyzing public health announcements in terms of sentiments, I found Julia Silge and David Robinson's book, [Text Mining with R](https://www.tidytextmining.com/), to be very helpful. The book has [a chapter dedicated to sentiment analysis](https://www.tidytextmining.com/sentiment), where the authors demonstrate how to conduct sentiment analysis using general-purpose lexicons like Bing and NRC. However, Julia and David also highlight a major limitation of lexicon-based sentiment analysis. The analysis considers only single words (i.e., unigrams) and does not consider qualifiers before a word. For instance, negation words like "not" in "not true" are ignored, and sentiment analysis processes them as two separate words, "not" and "true". Furthermore, if a particular word (either positive or negative) is repeatedly used throughout the text, this may skew the results depending on the polarity (positive or negative) of this word. Therefore, the results of lexicon-based sentiment analysis should be interpreted carefully. -Now, let's move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw's media briefings during the COVID-19 pandemic. My goal is to showcase several R packages capable of running sentiment analysis `r ji("graph")`. +Now, let's move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw's media briefings during the COVID-19 pandemic. My goal is to showcase two R packages capable of running sentiment analysis `r ji("graph")`. ## Example @@ -89,9 +88,9 @@ The dataset has three columns: * date (the exact date of the media briefing), and * word (words or tokens used in media briefing) -### Descriptve Analysis +### Descriptive Analysis -Now, we can calculate some descriptive statistics to better understand the content of our dataset. We can begin by finding the most common 5 words for each month. +Now, we can calculate some descriptive statistics to better understand the content of our dataset. We will begin by finding the top 5 words (based on their frequency) for each month. ```{r ch3, echo=TRUE, eval=TRUE} library("dplyr") @@ -103,7 +102,7 @@ wave1_alberta %>% as.data.frame() ``` -We see that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and print it visually: +The output shows that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and view the results visually: ```{r ch4, echo=TRUE, eval=TRUE, fig.cap="Most common words based on frequency"} library("tidytext") @@ -130,7 +129,7 @@ wave1_alberta %>% strip.text = element_text(colour = "black", face = "bold", size = 13)) ``` -Since some words may be common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)--a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out [my previous blog post](https://okan.cloud/posts/2022-01-16-text-vectorization-using-python-tf-idf/#tf-idf)). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, *wave1_tf_idf*, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we will create a bar plot with the TF-IDF values to view the common words unique to each month. +Since some words are common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)--a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out [my previous blog post](https://okan.cloud/posts/2022-01-16-text-vectorization-using-python-tf-idf/#tf-idf)). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, *wave1_tf_idf*, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we use this dataset to create a bar plot with the TF-IDF values to view the common words unique to each month. ```{r ch5, echo=TRUE, eval=TRUE, layout="l-body-outset", fig.width=8, fig.height=6, fig.cap="Most common words based on TIF-IDF"} # Calculate TF-IDF for the words for each month @@ -157,7 +156,7 @@ wave1_tf_idf %>% labs(x = NULL, y = "TF-IDF") ``` -Before we move to the sentiment analysis, let's take a look at another descriptive variable: the length of each media briefing. We can see whether the length of the media briefings (i.e., the number of tokens) varied over time. +These results are more informative because the tokens shown in the figure reflect unique topics discussed each month. For example, in March 2020, the media briefings were mostly about limiting the travels, returning from crowded conferences, and COVID-19 cases in cruise ships. In June 2020, the focus of the media briefings shifted towards mask requirements, people protesting pandemic-related restrictions, and so on. Before we switch back to the sentiment analysis, let's take a look at another descriptive variable: the length of each media briefing. This will show us whether the media briefings became longer or shorter over time. ```{r ch6, echo=TRUE, eval=TRUE, layout="l-body-outset", fig.width=8, fig.height=6, fig.cap="Number of words by days"} wave1_alberta %>% @@ -177,11 +176,11 @@ wave1_alberta %>% facet_wrap(~ month, scales = "free_x") ``` -The figure above shows that the length of media briefings varied over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length. +The figure above shows that the length of media briefings varied quite substantially over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length. ### Sentiment Analysis with **tidytext** -After analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the **tidytext** package for performing sentiment analysis. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to count the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words). +After analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the **tidytext** package for performing sentiment analysis and computing sentiment scores. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to find the difference between the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words). ```{r ch7, echo=TRUE, eval=TRUE, layout="l-body-outset", fig.width=8, fig.height=6, fig.cap="Sentiment scores based on the Bing lexicon"} # From the three lexicons, Bing is already available in the tidytext page @@ -191,6 +190,7 @@ get_sentiments("bing") get_sentiments("afinn") get_sentiments("nrc") +# We will need the spread function from tidyr library("tidyr") # Sentiment scores with bing (based on frequency) @@ -213,7 +213,7 @@ wave1_alberta %>% axis.text.y = element_text(size = 11)) ``` -The figure shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments. +The figure above shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments. Next, we will use the AFINN lexicon. Unlike Bing that labels words as positive or negative, AFINN assigns a numerical weight to each word. The sign of the weight indicates the polarity of sentiments (i.e., positive or negative) while the value indicates the intensity of sentiments. Now, let's see if these weighted values produce different sentiment scores. @@ -238,9 +238,9 @@ wave1_alberta %>% axis.text.y = element_text(size = 11)) ``` -The results based on the AFINN lexicon are quite different! It seems that once we take the "weight" of the tokens into account, most media briefings are overall positive (see the green bars), although there are still some day with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon but not in the AFINN lexicon. Disregarding those tokens may impact the results substantially. +The results based on the AFINN lexicon seem to be quite different! Once we take the "weight" of the tokens into account, most media briefings turn out to be positive (see the green bars), although there are still some days with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon, but not in the AFINN lexicon. Disregarding those tokens might have impacted the results. -The final lexicon we are going to try using the **tidytext** package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s [@plutchik1980general] psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. WE are going to count the number of words or token associated with each emotion and then +The final lexicon we are going to try using the **tidytext** package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s [@plutchik1980general] psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. We are going to count the number of words or token associated with each emotion and then visualize the results. ```{r ch9, echo=TRUE, eval=TRUE, layout="l-body-outset", fig.width=8, fig.height=6, fig.cap="Sentiment scores based on the NRC lexicon"} wave1_alberta %>% @@ -261,13 +261,13 @@ wave1_alberta %>% axis.text.y = element_text(size = 11)) ``` -The figure shows that the media briefings were mostly positive each month. Dr. Hinshaw also used words associated with "trust", "anticipation", and "fear". Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered. +The figure shows that the media briefings are mostly positive each month. Dr. Hinshaw used words associated with "trust", "anticipation", and "fear". Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered. ### Sentiment Analysis with **sentimentr** -Another package for lexicon-based sentiment analysis is **sentimentr** [@R-sentiment]. Unlike the **tidytext** package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence "I am not unhappy" is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words "not" and "unhappy". Similarly, "I hardly like this book" is a negative sentence but the analysis of individual words, "hardly" and "like", may yield a positive sentiment score. The **sentimentr** package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on **sentimentr**: ). +Another package for lexicon-based sentiment analysis is **sentimentr** [@R-sentiment]. Unlike the **tidytext** package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence "I am not unhappy" is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words, "not" and "unhappy". Similarly, "I hardly like this book" is a negative sentence but the analysis of individual words, "hardly" and "like", may yield a positive sentiment score. The **sentimentr** package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on **sentimentr**: ). -To benefit from the **sentimentr** package, we need actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available [**here**](https://github.com/okanbulut/blog/raw/master/data_and_codes/wave1_alberta_sentence.RData). We will first import this dataset into R, get individual sentences for each media briefing using the `get_sentences()` function, and then calculate sentiment scores by day and month via `sentiment_by()`. +To benefit from the **sentimentr** package, we need the actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available [**here**](https://github.com/okanbulut/blog/raw/master/data_and_codes/wave1_alberta_sentence.RData). We will first import this dataset into R, get individual sentences for each media briefing using the `get_sentences()` function, and then calculate sentiment scores by day and month via `sentiment_by()`. ```{r ch10, echo=TRUE, eval=FALSE} library("sentimentr") @@ -315,9 +315,9 @@ wave1_sentimentr %>% axis.text.y = element_text(size = 11)) ``` -In the figure above, blue bars indicate highly positive sentiment scores whereas red bars indicate relatively lower sentiment scores. The patterns of the sentiment scores produced by **sentimentr** are similar to those based on the AFINN lexicon, although this analysis is based on the original media briefings (instead of only tokens) and the valence shifters are also considered in the computation of sentiment scores. +In the figure above, the blue bars represent highly positive sentiment scores, while the red bars depict comparatively lower sentiment scores. The patterns observed in the sentiment scores generated by **sentimentr** closely resemble those derived from the AFINN lexicon. Notably, this analysis is based on the original media briefings rather than solely tokens, with consideration given to valence shifters in the computation of sentiment scores. The convergence between the sentiment patterns identified by **sentimentr** and those from AFINN is not entirely unexpected. Both approaches incorporate similar weighting systems and mechanisms that account for word intensity. This alignment reinforces our confidence in the initial findings obtained through AFINN, validating the consistency and reliability of our analyses with **sentimentr**. ## Concluding Remarks -xxx +In conclusion, lexicon-based sentiment analysis in R offers a powerful tool for uncovering the emotional nuances within textual data. Throughout this post, we have explored the fundamental concepts of lexicon-based sentiment analysis and provided a practical demonstration of its implementation using R. By leveraging packages such as **sentimentr** and **tidytext**, we have illustrated how sentiment analysis can be seamlessly integrated into your data analysis workflow. As you embark on your journey into sentiment analysis, remember that the insights gained from this technique extend far beyond the surface of text. They provide valuable perspectives on public opinion, consumer sentiment, and beyond. I encourage you to delve deeper into lexicon-based sentiment analysis, experiment with the examples presented here, and unlock the rich insights waiting to be discovered within your own data. Happy analyzing! diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html similarity index 97% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html index 5679748..f5de8dc 100644 --- a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html +++ b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r.html @@ -111,24 +111,24 @@ Lexicon-Based Sentiment Analysis Using R - + - - + + - + - + @@ -145,7 +145,7 @@ @@ -1555,7 +1555,7 @@ @@ -1575,14 +1575,14 @@

Lexicon-Based Sentiment Analysis Using R

sentiment
-

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed vehicula tempor bibendum. Sed augue turpis, efficitur ut ipsum quis, fermentum pulvinar quam. Proin molestie orci erat, et condimentum dui ornare ac. Quisque sit amet lacus id nisl pellentesque sagittis. Vestibulum non urna non eros condimentum commodo. Proin vitae nulla fermentum.

-

(11 min read)

+

In this post, we will uncover the power of lexicon-based sentiment analysis using R. I demonstrate how to harness the capabilities of lexicons like NRC and Bing to decipher the emotional pulse of your text data. With practical examples, you’ll gain the skills to analyze sentiment scores and extract valuable insights from your textual data sets.

+

(12 min read)

@@ -1594,7 +1594,7 @@

Contents

  • Lexicon-Based Sentiment Analysis
  • Example
  • @@ -1607,14 +1607,14 @@

    Contents

    Introduction

    -

    During the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied instead of watching the news all the time. Among the several techniques I reviewed, those related to natural language processing (NLP) were the most interesting ones. So, I decided to choose one technique from this field and learn more about it. This was sentiment analysis, which is also referred to as “opinion mining” in the literature. This analytical approach enables researchers to extract and interpret emotions expressed towards a particular subject in a written text. With sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts.

    -

    Amidst the COVID-19 pandemic, numerous researchers turned to sentiment analysis to gauge public responses to news and developments surrounding the virus. This included scrutinizing user-generated content on popular social media platforms like Twitter, YouTube, and Instagram. Inspired by this approach, my colleagues and I aimed to expand upon existing research by analyzing the daily briefings delivered by public health authorities. In Alberta, Dr. Deena Hinshaw, the province’s chief medical officer of health, regularly provided updates on the region’s response to the ongoing pandemic, Through our analysis of these public health announcements, we sought to evaluate Alberta’s effectiveness in employing communication strategies during this complex public health crisis (Bulut & Poth, 2022; Poth et al., 2021).

    -

    In this post, my aim is to guide you through the process of conducting sentiment analysis using R. Specifically, I’ll delve into “lexicon-based sentiment analysis,” which I’ll explain more in the next section. You’ll find examples of lexicon-based sentiment analysis that we’ve incorporated into our publications mentioned above. In upcoming posts, I’ll also explore more sophisticated forms of sentiment analysis leveraging cutting-edge pre-trained models available on Hugging Face.

    +

    During the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied rather than constantly immersing myself in pandemic-related news. After evaluating several options, I found the concepts related to natural language processing (NLP) particularly captivating. So, I opted to delve deeper into this field and explore one specific technique: sentiment analysis, also known as “opinion mining” in academic literature. This analytical method empowers researchers to extract and interpret the emotions conveyed toward a specific subject within written text. Through sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts.

    +

    Amidst the pandemic, I observed a significant trend among researchers who turned to sentiment analysis as a tool to measure public responses to news and developments surrounding the virus. This involved analyzing user-generated content on popular social media platforms such as Twitter, YouTube, and Instagram. Intrigued by this methodology, my colleagues and I endeavored to contribute to the existing body of research by scrutinizing the daily briefings provided by public health authorities. In Alberta, Dr. Deena Hinshaw, who used to be the province’s chief medical officer of health, regularly delivered updates on the region’s response to the ongoing pandemic. Through our analysis of these public health announcements, we aimed to assess Alberta’s effectiveness in implementing communication strategies during this intricate public health crisis. Our investigation, conducted through the lenses of sentiment analysis, sought to shed light on the efficacy of communication strategies employed during this challenging period in public health (Bulut & Poth, 2022; Poth et al., 2021).

    +

    In this post, I aim to walk you through the process of performing sentiment analysis using R. Specifically, I’ll focus on “lexicon-based sentiment analysis,” which I’ll discuss in more detail in the next section. I’ll provide examples of lexicon-based sentiment analysis that we’ve integrated into the publications referenced earlier. Additionally, in future posts, I’ll delve into more advanced forms of sentiment analysis, making use of state-of-the-art pre-trained models accessible on Hugging Face.

    Lexicon-Based Sentiment Analysis

    -

    As I began to explore sentiment analysis, I found that the most common method of extracting sentiments was through lexicon-based sentiment analysis. This technique involves the use of a specific lexicon, which is essentially the vocabulary of a language or subject, to determine the direction and strength of sentiments expressed in a given text. Some lexicons, such as the Bing lexicon (Hu & Liu, 2004), categorize words as either positive or negative. Other lexicons offer more descriptive labels for the sentiments, such as the NRC Emotion Lexicon (Mohammad & Turney, 2013), which categorizes words based on both positive and negative sentiments and Plutchik’s (Plutchik, 1980) psych evolutionary theory of basic emotions (i.e., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust).

    -

    Lexicon-based sentiment analysis is a process that involves matching words in a given text with the words present in general-purpose lexicons like NRC and Bing. Each word is assigned a specific sentiment, such as positive or negative. The overall sentiment score of the text is then calculated by adding up the individual sentiment scores of each word. For example, if a text contains 50 positive and 30 negative words based on the Bing lexicon, the total sentiment score would be 20, which indicates that the text expresses mostly positive sentiments. Conversely, a negative value would suggest the opposite sentiment.

    +

    As I learned more about sentiment analysis, I discovered that the predominant method for extracting sentiments is lexicon-based sentiment analysis. This approach entails utilizing a specific lexicon, essentially the vocabulary of a language or subject, to discern the direction and intensity of sentiments conveyed within a given text. Some lexicons, like the Bing lexicon (Hu & Liu, 2004), classify words as either positive or negative. Conversely, other lexicons provide more detailed sentiment labels, such as the NRC Emotion Lexicon (Mohammad & Turney, 2013), which categorizes words based on both positive and negative sentiments, as well as Plutchik’s (Plutchik, 1980) psych evolutionary theory of basic emotions (e.g., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust).

    +

    Lexicon-based sentiment analysis operates by aligning words within a given text with those found in widely-used lexicons such as NRC and Bing. Each word receives an assigned sentiment, typically categorized as positive or negative. The text’s collective sentiment score is subsequently derived by summing the individual sentiment scores of its constituent words. For instance, in a scenario where a text incorporates 50 positive and 30 negative words according to the Bing lexicon, the resulting sentiment score would be 20. This value indicates a predominance of positive sentiments within the text. Conversely, a negative total would imply a prevalence of negative sentiments.

    Performing lexicon-based sentiment analysis using R can be both fun and tricky at the same time. While analyzing public health announcements in terms of sentiments, I found Julia Silge and David Robinson’s book, Text Mining with R, to be very helpful. The book has a chapter dedicated to sentiment analysis, where the authors demonstrate how to conduct sentiment analysis using general-purpose lexicons like Bing and NRC. However, Julia and David also highlight a major limitation of lexicon-based sentiment analysis. The analysis considers only single words (i.e., unigrams) and does not consider qualifiers before a word. For instance, negation words like “not” in “not true” are ignored, and sentiment analysis processes them as two separate words, “not” and “true”. Furthermore, if a particular word (either positive or negative) is repeatedly used throughout the text, this may skew the results depending on the polarity (positive or negative) of this word. Therefore, the results of lexicon-based sentiment analysis should be interpreted carefully.

    -

    Now, let’s move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw’s media briefings during the COVID-19 pandemic. My goal is to showcase several R packages capable of running sentiment analysis 📉.

    +

    Now, let’s move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw’s media briefings during the COVID-19 pandemic. My goal is to showcase two R packages capable of running sentiment analysis 📉.

    Example

    For the sake of simplicity, we will focus on the first wave of the pandemic (March 2020 - June 2020). The transcripts of all media briefings were available in the government of Alberta’s COVID-19 pandemic website (https://www.alberta.ca/covid). After importing these transcripts into R, I turned all the text into lowercase and then applied word tokenization using the tidytext (Silge & Robinson, 2016) and tokenizers (Mullen et al., 2018) packages. Word tokenization split the sentences in the media briefings into individual words for each entry (i.e., day of media briefings). Next, I applied lemmatization to the tokens to resolve each word into its canonical form using the textstem package (Rinker, 2018). Finally, I removed common stopwords, such as “my”, “for”, “that”, “with”, and “for, using the stopwords package (Benoit et al., 2021). The final dataset is available here. Now, let’s import the data into R and then review its content.

    @@ -1637,8 +1637,8 @@

    Example

  • date (the exact date of the media briefing), and
  • word (words or tokens used in media briefing)
  • -

    Descriptve Analysis

    -

    Now, we can calculate some descriptive statistics to better understand the content of our dataset. We can begin by finding the most common 5 words for each month.

    +

    Descriptive Analysis

    +

    Now, we can calculate some descriptive statistics to better understand the content of our dataset. We will begin by finding the top 5 words (based on their frequency) for each month.

    library("dplyr")
    @@ -1671,7 +1671,7 @@ 

    Descriptve Analysis

    19 June 2020 people 57 20 June 2020 community 43
    -

    We see that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and print it visually:

    +

    The output shows that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and view the results visually:

    library("tidytext")
    @@ -1704,7 +1704,7 @@ 

    Descriptve Analysis

    -

    Since some words may be common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)–a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out my previous blog post). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, wave1_tf_idf, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we will create a bar plot with the TF-IDF values to view the common words unique to each month.

    +

    Since some words are common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)–a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out my previous blog post). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, wave1_tf_idf, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we use this dataset to create a bar plot with the TF-IDF values to view the common words unique to each month.

    # Calculate TF-IDF for the words for each month
    @@ -1737,7 +1737,7 @@ 

    Descriptve Analysis

    -

    Before we move to the sentiment analysis, let’s take a look at another descriptive variable: the length of each media briefing. We can see whether the length of the media briefings (i.e., the number of tokens) varied over time.

    +

    These results are more informative because the tokens shown in the figure reflect unique topics discussed each month. For example, in March 2020, the media briefings were mostly about limiting the travels, returning from crowded conferences, and COVID-19 cases in cruise ships. In June 2020, the focus of the media briefings shifted towards mask requirements, people protesting pandemic-related restrictions, and so on. Before we switch back to the sentiment analysis, let’s take a look at another descriptive variable: the length of each media briefing. This will show us whether the media briefings became longer or shorter over time.

    wave1_alberta %>%
    @@ -1763,9 +1763,9 @@ 

    Descriptve Analysis

    -

    The figure above shows that the length of media briefings varied over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length.

    +

    The figure above shows that the length of media briefings varied quite substantially over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length.

    Sentiment Analysis with tidytext

    -

    After analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the tidytext package for performing sentiment analysis. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to count the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words).

    +

    After analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the tidytext package for performing sentiment analysis and computing sentiment scores. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to find the difference between the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words).

    # From the three lexicons, Bing is already available in the tidytext page
    @@ -1822,7 +1822,8 @@ 

    Sentiment Analysis with tidyte 10 abandonment fear # ℹ 13,891 more rows

    -
    library("tidyr")
    +
    # We will need the spread function from tidyr
    +library("tidyr")
     
     # Sentiment scores with bing (based on frequency)
     wave1_alberta %>%
    @@ -1850,7 +1851,7 @@ 

    Sentiment Analysis with tidyte

    -

    The figure shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments.

    +

    The figure above shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments.

    Next, we will use the AFINN lexicon. Unlike Bing that labels words as positive or negative, AFINN assigns a numerical weight to each word. The sign of the weight indicates the polarity of sentiments (i.e., positive or negative) while the value indicates the intensity of sentiments. Now, let’s see if these weighted values produce different sentiment scores.

    @@ -1879,8 +1880,8 @@

    Sentiment Analysis with tidyte

    -

    The results based on the AFINN lexicon are quite different! It seems that once we take the “weight” of the tokens into account, most media briefings are overall positive (see the green bars), although there are still some day with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon but not in the AFINN lexicon. Disregarding those tokens may impact the results substantially.

    -

    The final lexicon we are going to try using the tidytext package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s (Plutchik, 1980) psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. WE are going to count the number of words or token associated with each emotion and then

    +

    The results based on the AFINN lexicon seem to be quite different! Once we take the “weight” of the tokens into account, most media briefings turn out to be positive (see the green bars), although there are still some days with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon, but not in the AFINN lexicon. Disregarding those tokens might have impacted the results.

    +

    The final lexicon we are going to try using the tidytext package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s (Plutchik, 1980) psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. We are going to count the number of words or token associated with each emotion and then visualize the results.

    wave1_alberta %>%
    @@ -1907,10 +1908,10 @@ 

    Sentiment Analysis with tidyte

    -

    The figure shows that the media briefings were mostly positive each month. Dr. Hinshaw also used words associated with “trust”, “anticipation”, and “fear”. Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered.

    +

    The figure shows that the media briefings are mostly positive each month. Dr. Hinshaw used words associated with “trust”, “anticipation”, and “fear”. Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered.

    Sentiment Analysis with sentimentr

    -

    Another package for lexicon-based sentiment analysis is sentimentr (Rinker, 2021). Unlike the tidytext package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence “I am not unhappy” is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words “not” and “unhappy”. Similarly, “I hardly like this book” is a negative sentence but the analysis of individual words, “hardly” and “like”, may yield a positive sentiment score. The sentimentr package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on sentimentr: https://github.com/trinker/sentimentr).

    -

    To benefit from the sentimentr package, we need actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available here. We will first import this dataset into R, get individual sentences for each media briefing using the get_sentences() function, and then calculate sentiment scores by day and month via sentiment_by().

    +

    Another package for lexicon-based sentiment analysis is sentimentr (Rinker, 2021). Unlike the tidytext package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence “I am not unhappy” is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words, “not” and “unhappy”. Similarly, “I hardly like this book” is a negative sentence but the analysis of individual words, “hardly” and “like”, may yield a positive sentiment score. The sentimentr package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on sentimentr: https://github.com/trinker/sentimentr).

    +

    To benefit from the sentimentr package, we need the actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available here. We will first import this dataset into R, get individual sentences for each media briefing using the get_sentences() function, and then calculate sentiment scores by day and month via sentiment_by().

    library("sentimentr")
    @@ -1959,9 +1960,9 @@ 

    Sentiment Analysis with sent

    -

    In the figure above, blue bars indicate highly positive sentiment scores whereas red bars indicate relatively lower sentiment scores. The patterns of the sentiment scores produced by sentimentr are similar to those based on the AFINN lexicon, although this analysis is based on the original media briefings (instead of only tokens) and the valence shifters are also considered in the computation of sentiment scores.

    +

    In the figure above, the blue bars represent highly positive sentiment scores, while the red bars depict comparatively lower sentiment scores. The patterns observed in the sentiment scores generated by sentimentr closely resemble those derived from the AFINN lexicon. Notably, this analysis is based on the original media briefings rather than solely tokens, with consideration given to valence shifters in the computation of sentiment scores. The convergence between the sentiment patterns identified by sentimentr and those from AFINN is not entirely unexpected. Both approaches incorporate similar weighting systems and mechanisms that account for word intensity. This alignment reinforces our confidence in the initial findings obtained through AFINN, validating the consistency and reliability of our analyses with sentimentr.

    Concluding Remarks

    -

    xxx

    +

    In conclusion, lexicon-based sentiment analysis in R offers a powerful tool for uncovering the emotional nuances within textual data. Throughout this post, we have explored the fundamental concepts of lexicon-based sentiment analysis and provided a practical demonstration of its implementation using R. By leveraging packages such as sentimentr and tidytext, we have illustrated how sentiment analysis can be seamlessly integrated into your data analysis workflow. As you embark on your journey into sentiment analysis, remember that the insights gained from this technique extend far beyond the surface of text. They provide valuable perspectives on public opinion, consumer sentiment, and beyond. I encourage you to delve deeper into lexicon-based sentiment analysis, experiment with the examples presented here, and unlock the rich insights waiting to be discovered within your own data. Happy analyzing!

    diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/anchor-4.2.2/anchor.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/anchor-4.2.2/anchor.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/anchor-4.2.2/anchor.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/anchor-4.2.2/anchor.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/bowser-1.9.3/bowser.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/bowser-1.9.3/bowser.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/bowser-1.9.3/bowser.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/bowser-1.9.3/bowser.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/distill-2.2.21/template.v2.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/distill-2.2.21/template.v2.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/distill-2.2.21/template.v2.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/distill-2.2.21/template.v2.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.25/header-attrs.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.25/header-attrs.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.25/header-attrs.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.25/header-attrs.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.map b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.map similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.map rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/jquery-3.6.0/jquery-3.6.0.min.map diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/css/pagedtable.css b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/css/pagedtable.css similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/css/pagedtable.css rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/css/pagedtable.css diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/js/pagedtable.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/js/pagedtable.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/js/pagedtable.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/pagedtable-1.1/js/pagedtable.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/popper-2.6.0/popper.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/popper-2.6.0/popper.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/popper-2.6.0/popper.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/popper-2.6.0/popper.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-bundle.umd.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-bundle.umd.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-bundle.umd.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-bundle.umd.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-light-border.css b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-light-border.css similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-light-border.css rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy-light-border.css diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.css b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.css similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.css rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.css diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.umd.min.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.umd.min.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.umd.min.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/tippy-6.2.7/tippy.umd.min.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/webcomponents-2.0.0/webcomponents.js b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/webcomponents-2.0.0/webcomponents.js similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/webcomponents-2.0.0/webcomponents.js rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/webcomponents-2.0.0/webcomponents.js diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/sentiment.bib b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/sentiment.bib similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/sentiment.bib rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/sentiment.bib diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/wave1_alberta.RData b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/wave1_alberta.RData similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/wave1_alberta.RData rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/wave1_alberta.RData diff --git a/_posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/wave1_alberta_sentence.RData b/_posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/wave1_alberta_sentence.RData similarity index 100% rename from _posts/2024-01-21-lexicon-based-sentiment-analysis-using-r/wave1_alberta_sentence.RData rename to _posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/wave1_alberta_sentence.RData diff --git a/docs/about.html b/docs/about.html index 05b79d6..22118e0 100644 --- a/docs/about.html +++ b/docs/about.html @@ -649,7 +649,7 @@

    ${suggestion.title}

    , "__ccd_em_outbound_click":{"get_url":{"urlParts":"any","queriesAllowed":"any"},"listen_data_layer":{"accessType":"specific","allowedEvents":["gtm.linkClick"]},"access_template_storage":{},"detect_link_click_events":{"allowWaitForTags":""}} , -"__ccd_em_page_view":{"listen_data_layer":{"accessType":"specific","allowedEvents":["gtm.historyChange-v2"]},"process_dom_events":{"targets":[{"targetType":"window","eventName":"pushstate"},{"targetType":"window","eventName":"popstate"}]},"access_template_storage":{},"detect_history_change_events":{}} +"__ccd_em_page_view":{"listen_data_layer":{"accessType":"specific","allowedEvents":["gtm.historyChange-v2"]},"access_template_storage":{},"detect_history_change_events":{}} , "__ccd_em_scroll":{"listen_data_layer":{"accessType":"specific","allowedEvents":["gtm.scrollDepth"]},"process_dom_events":{"targets":[{"targetType":"window","eventName":"resize"},{"targetType":"window","eventName":"scroll"},{"targetType":"window","eventName":"scrollend"}]},"access_template_storage":{},"detect_scroll_events":{}} , @@ -696,462 +696,457 @@

    ${suggestion.title}

    }; -var ba,da=function(a){var b=0;return function(){return bb)a=0,b=2147483647;return Math.floor(Math.random()*(b-a+1)+a)},Ma=function(a,b){for(var c=new La,d=0;db)a=0,b=2147483647;return Math.floor(Math.random()*(b-a+1)+a)},La=function(a,b){for(var c=new Ka,d=0;d>2,n=(f&3)<<4|g>>4,p=(g&15)<<2|h>>6,q=h&63;e||(q=64,d||(p=64));b.push(ub[m],ub[n],ub[p],ub[q])}return b.join("")} -function yb(a){function b(m){for(;d>4);64!=g&&(c+=String.fromCharCode(f<<4&240|g>>2),64!=h&&(c+=String.fromCharCode(g<<6&192|h)))}};var zb={},Ab=function(a,b){zb[a]=zb[a]||[];zb[a][b]=!0},Bb=function(){delete zb.GA4_EVENT},Cb=function(a){var b=zb[a];if(!b||0===b.length)return"";for(var c=[],d=0,e=0;e>2,n=(f&3)<<4|g>>4,p=(g&15)<<2|h>>6,q=h&63;e||(q=64,d||(p=64));b.push(ub[m],ub[n],ub[p],ub[q])}return b.join("")} +function yb(a){function b(m){for(;d>4);64!=g&&(c+=String.fromCharCode(f<<4&240|g>>2),64!=h&&(c+=String.fromCharCode(g<<6&192|h)))}};var zb={},Ab=function(a,b){zb[a]=zb[a]||[];zb[a][b]=!0},Bb=function(){delete zb.GA4_EVENT},Cb=function(a){var b=zb[a];if(!b||0===b.length)return"";for(var c=[],d=0,e=0;e"+a+"
    ");1===c.nodeType&&yc(c);c.innerHTML=fc(d);b=b.lastChild;for(var e=[];b.firstChild;)e.push(b.removeChild(b.firstChild));return e},Tc=function(a,b,c){c=c||100;for(var d={},e=0;e"+a+"
    ");1===c.nodeType&&yc(c);c.innerHTML=fc(d);b=b.lastChild;for(var e=[];b.firstChild;)e.push(b.removeChild(b.firstChild));return e},Sc=function(a,b,c){c=c||100;for(var d={},e=0;ee&&(e=Math.max(d+e,0));for(var f= e;fc?d+c:Math.min(c,e));for(var f=e;0<=f;f--)if(this.has(f)&&this.get(f)===b)return f;return-1},map:function(a,b){for(var c=this.length(),d=[],e=0;ed)throw Error("TypeError: ReduceRight on List with no elements.");}for(var h=f;0<=h;h--)this.has(h)&&(e=b.invoke(a,e,this.get(h),h,this));return e},reverse:function(){for(var a=ob(this),b=a.length-1,c=0;0<=b;b--,c++)a.hasOwnProperty(b)?this.set(c,a[b]):this.remove(c);return this},shift:function(){return this.shift()},slice:function(a,b,c){var d=this.length(); void 0===b&&(b=0);b=0>b?Math.max(d+b,0):Math.min(b,d);c=void 0===c?d:0>c?Math.max(d+c,0):Math.min(c,d);c=Math.max(b,c);for(var e=[],f=b;fK(this,b)},Wd=function(a,b){return K(this,a)>=K(this,b)},Xd=function(a,b){a=K(this,a);b=K(this,b);a instanceof jd&&(a=a.h);b instanceof jd&&(b=b.h);return a===b},Yd=function(a,b){return!Xd.call(this,a,b)},Zd=function(a,b,c){var d=[];K(this,a)?d=K(this,b):c&&(d=K(this,c));var e=eb(this.h,d);if(e instanceof xa)return e},$d=function(a,b){return K(this,a)>Number(K(this, -b))},ze=function(a,b){return Number(K(this,a))>>>Number(K(this,b))},Ae=function(a,b){return Number(K(this,a))&Number(K(this,b))},Be=function(a,b){return Number(K(this,a))^Number(K(this,b))},Ce=function(a,b){return Number(K(this,a))|Number(K(this,b))},De=function(){},Ee=function(a,b,c,d,e){var f=!0;try{var g=K(this,c);if(g instanceof xa)return g}catch(r){if(!(r instanceof qd&&a))throw f=r instanceof qd,r;var h=Ca(this.h),m=new jd(r);h.add(b,m);var n=K(this,d),p=eb(h,n);if(p instanceof xa)return p}finally{if(f&& -void 0!==e){var q=K(this,e);if(q instanceof xa)return q}}};var Ge=function(){this.h=new gb;Fe(this)};Ge.prototype.execute=function(a){return this.h.s(a)};var Fe=function(a){var b=function(c,d){var e=new ed(String(c),d);e.Eb();a.h.h.set(String(c),e)};b("map",de);b("and",Zc);b("contains",bd);b("equals",$c);b("or",ad);b("startsWith",cd);b("variable",dd)};var Ie=function(){this.h=new gb;He(this)};Ie.prototype.execute=function(a){return Je(this.h.s(a))}; -var Ke=function(a,b,c){return Je(a.h.F(b,c))},He=function(a){var b=function(c,d){var e=String(c),f=new ed(e,d);f.Eb();a.h.h.set(e,f)};b(0,ud);b(1,vd);b(2,wd);b(3,xd);b(56,Ae);b(57,xe);b(58,we);b(59,Ce);b(60,ye);b(61,ze);b(62,Be);b(53,yd);b(4,zd);b(5,Ad);b(52,Bd);b(6,Cd);b(49,Dd);b(7,ce);b(8,de);b(9,Ad);b(50,Ed);b(10,Fd);b(12,Gd);b(13,Hd);b(51,Sd);b(47,Kd);b(54,Ld);b(55,Md);b(63,Rd);b(64,Od);b(65,Pd);b(66,Qd);b(15,Td);b(16,Ud);b(17,Ud);b(18,Vd);b(19,Wd);b(20,Xd);b(21,Yd);b(22,Zd);b(23,$d);b(24,be); -b(25,ee);b(26,fe);b(27,ge);b(28,he);b(29,ie);b(45,je);b(30,ke);b(32,le);b(33,le);b(34,me);b(35,me);b(46,ne);b(36,oe);b(43,pe);b(37,qe);b(38,re);b(39,se);b(67,Ee);b(40,te);b(44,De);b(41,ue);b(42,ve)};function Je(a){if(a instanceof xa||a instanceof ed||a instanceof rb||a instanceof sb||a instanceof jd||null===a||void 0===a||"string"===typeof a||"number"===typeof a||"boolean"===typeof a)return a};function Le(a){switch(a){case 1:return"1";case 2:case 4:return"0";default:return"-"}}function Me(a){switch(a){case 1:return"G";case 3:return"g";case 2:return"D";case 4:return"d";case 0:return"g";default:return"g"}}function Ne(a,b){var c=a[1]||0,d=a[2]||0;switch(b){case 0:return"G1"+Le(c)+Le(d);case 1:return"G2"+Me(c)+Me(d);default:return"g1--"}};var Oe=function(){var a=function(b){return{toString:function(){return b}}};return{kk:a("consent"),fi:a("convert_case_to"),gi:a("convert_false_to"),hi:a("convert_null_to"),ii:a("convert_true_to"),ji:a("convert_undefined_to"),tn:a("debug_mode_metadata"),na:a("function"),Tg:a("instance_name"),Sk:a("live_only"),Tk:a("malware_disabled"),Uk:a("metadata"),Xk:a("original_activity_id"),In:a("original_vendor_template_id"),Hn:a("once_on_load"),Wk:a("once_per_event"),ej:a("once_per_load"),Mn:a("priority_override"), -Nn:a("respected_consent_types"),kj:a("setup_tags"),pe:a("tag_id"),pj:a("teardown_tags")}}();var kf; -var lf=[],mf=[],nf=[],of=[],pf=[],qf={},rf,sf,tf=function(a){sf=sf||a},uf=function(a){},vf,wf=[],xf=function(a,b){var c={};c[Oe.na]="__"+a;for(var d in b)b.hasOwnProperty(d)&&(c["vtp_"+d]=b[d]);return c},yf=function(a, -b){var c=a[Oe.na],d=b&&b.event;if(!c)throw Error("Error: No function name given for function call.");var e=qf[c],f=b&&2===b.type&&d.reportMacroDiscrepancy&&e&&-1!==wf.indexOf(c),g={},h={},m;for(m in a)a.hasOwnProperty(m)&&0===m.indexOf("vtp_")&&(e&&d&&d.checkPixieIncompatibility&&d.checkPixieIncompatibility(a[m]),e&&(g[m]=a[m]),!e||f)&&(h[m.substr(4)]=a[m]);e&&d&&d.cachedModelValues&&(g.vtp_gtmCachedValues=d.cachedModelValues);if(b){if(null==b.name){var n;a:{var p=b.type,q=b.index;if(null==q)n=""; -else{var r;switch(p){case 2:r=lf[q];break;case 1:r=of[q];break;default:n="";break a}var t=r&&r[Oe.Tg];n=t?String(t):""}}b.name=n}e&&(g.vtp_gtmEntityIndex=b.index,g.vtp_gtmEntityName=b.name)}var u,v;e&&(u=e(g));if(!e||f)v=kf(c,h,b);f&&d&&(pb(u)?typeof u!==typeof v&&d.reportMacroDiscrepancy(d.id,c):u!==v&&d.reportMacroDiscrepancy(d.id,c));return e?u:v},Af=function(a,b,c){c=c||[];var d={},e;for(e in a)a.hasOwnProperty(e)&&(d[e]=zf(a[e],b,c));return d},zf=function(a,b,c){if(Ia(a)){var d;switch(a[0]){case "function_id":return a[1]; -case "list":d=[];for(var e=1;ec){var t=String.fromCharCode(10>c?48+c:65+c-10);b["k"+t]=(""+String(e)).replace(/~/g,"~~");b["v"+t]=f;c++}});var d=[];l(b,function(e,f){d.push(""+e+f)});return d.join("~")}, -cg={item_id:"id",item_name:"nm",item_brand:"br",item_category:"ca",item_category2:"c2",item_category3:"c3",item_category4:"c4",item_category5:"c5",item_variant:"va",price:"pr",quantity:"qt",coupon:"cp",item_list_name:"ln",index:"lp",item_list_id:"li",discount:"ds",affiliation:"af",promotion_id:"pi",promotion_name:"pn",creative_name:"cn",creative_slot:"cs",location_id:"lo"},dg={id:"id",name:"nm",brand:"br",variant:"va",list_name:"ln",list_position:"lp",list:"ln",position:"lp",creative:"cn"},eg=["ca", -"c2","c3","c4","c5"];var gg=function(a){var b=[];l(a,function(c,d){null!=d&&b.push(encodeURIComponent(c)+"="+encodeURIComponent(String(d)))});return b.join("&")},hg=function(a,b,c,d){this.la=a.la;this.Jc=a.Jc;this.kh=a.kh;this.s=b;this.F=c;this.C=gg(a.la);this.h=gg(a.kh);this.M=this.h.length;if(d&&16384w&&(v=y,w=C)});x==c.length&&(g[t]=v)});jg(g,d);b&&d.push("_s="+b);for(var h=d.join("&"),m=[],n={},p=0;p=Number(b)}function wg(a,b){return Number(a)<=Number(b)}function xg(a,b){return Number(a)>Number(b)}function yg(a,b){return Number(a)>=5;d?d=!1:e|=32;c="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[e]+c}a<<=2;d||(a|=32);return c="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[a|b]+c};var Ig=/^([a-z][a-z0-9]*):(!|\?)(\*|string|boolean|number|Fn|PixieMap|List|OpaqueValue)$/i,Jg={Fn:"function",PixieMap:"Object",List:"Array"},M=function(a,b,c){for(var d=0;da.length?void 0:Zg(c.getHitData(a[1]),a.slice(2));case "metadata":return 2>a.length?void 0:Zg(c.getMetadata(a[1]),a.slice(2));case "eventName":return c.getEventName();case "destinationId":return c.getDestinationId();default:throw Error(d+" is not a valid field that can be accessed\n from PreHit data.");}}}, -ah=function(a,b){if(a){if(void 0!==a.contextValue){var c;a:{var d=a.contextValue,e=d.keyParts;if(e&&0!==e.length){var f=d.namespaceType;switch(f){case 1:c=$g(e,b);break a;case 2:var g=b.macro;c=g?g[e[0]]:void 0;break a;default:throw Error("Unknown Namespace Type used: "+f);}}c=void 0}return c}if(void 0!==a.booleanExpressionValue)return Yg(a.booleanExpressionValue,b);if(void 0!==a.booleanValue)return!!a.booleanValue;if(void 0!==a.stringValue)return String(a.stringValue);if(void 0!==a.integerValue)return Number(a.integerValue); -if(void 0!==a.doubleValue)return Number(a.doubleValue);throw Error("Unknown field used for variable of type ExpressionValue:"+a);}},Yg=function(a,b){var c=a.args;if(!Ia(c)||0===c.length)throw Error('Invalid boolean expression format. Expected "args":'+c+" property to\n be non-empty array.");var d=function(g){return ah(g,b)};switch(a.type){case 1:for(var e=0;ee?c.push(e):2048>e?c.push(192|e>>6,128|e&63):55296>e||57344<=e?c.push(224|e>>12,128|e>>6&63,128|e&63):(e=65536+((e&1023)<<10|a.charCodeAt(++d)&1023),c.push(240|e>>18,128|e>>12&63,128|e>>6&63,128|e&63))}b=new Uint8Array(c)}return b},Hh=/[0-9`~!@#$%^&*()_\-+=:;<>,.?|/\\[\]]/g,Kh=/^\S+@\S+\.\S+$/,Ih=/^\+\d{10,15}$/,Dh=/[.~]/g,Ph=/^[0-9A-Za-z_-]{43}$/,Mh=/^[0-9A-Fa-f]{64}$/,Qh={},Rh=(Qh.email="em",Qh.phone_number="pn",Qh.first_name="fn", -Qh.last_name="ln",Qh.street="sa",Qh.city="ct",Qh.region="rg",Qh.country="co",Qh.postal_code="pc",Qh.error_code="ec",Qh),Sh={},Th=(Sh.email="sha256_email_address",Sh.phone_number="sha256_phone_number",Sh.first_name="sha256_first_name",Sh.last_name="sha256_last_name",Sh.street="sha256_street",Sh),Vh=function(a,b){a.some(function(c){c.value&&Uh.indexOf(c.name)})?b(a):z.Promise?Promise.all(a.map(function(c){return c.value&&-1!==Uh.indexOf(c.name)?Oh(c.value).then(function(d){c.value=d}):Promise.resolve()})).then(function(){b(a)}).catch(function(){b([])}): -b([])},Xh=function(a,b){var c=Wh(a);Vh(c,b)},Wh=function(a){function b(r,t,u,v){var w=Ch(r);""!==w&&(Mh.test(w)?h.push({name:t,value:w,index:v}):h.push({name:t,value:u(w),index:v}))}function c(r,t){var u=r;if(k(u)||Ia(u)){u=Ia(r)?r:[r];for(var v=0;v>21:b;return b};var ej=[];function fj(a){switch(a){case 26:return 3;case 52:return 14;case 53:return 8;case 65:return 11;case 66:return 12;case 69:return 10;case 71:return 13;case 67:return 15;case 113:return 16}}function R(a){ej[a]=!0;var b=fj(a);b&&(Rf[b]=!0)} -R(5);R(6);R(7);R(9);R(10); -R(15);R(11); -R(16); -R(19);R(20);R(21);R(22);R(24);R(25);R(28); -R(30);R(34); -R(35);R(36);R(38); -R(39);R(43); -R(46);R(49); -R(50);R(51);R(54);R(55); -R(56);R(58);R(59); -R(60);R(61);R(62); -R(67); -R(69);R(70); -R(73);R(75); -R(80); - -R(86); - -R(95);R(97),R(84),R(45),R(98),R(99);R(53);R(102); - -function S(a){return!!ej[a]}var gj=!1;function hj(a){}var ij=Number('1');var pj=function(a){Ab("HEALTH",a)};var qj;try{qj=JSON.parse(yb("eyIwIjoiQ0EiLCIxIjoiQ0EtQUIiLCIyIjpmYWxzZSwiMyI6Imdvb2dsZS5jYSIsIjQiOiIiLCI1Ijp0cnVlLCI2IjpmYWxzZSwiNyI6ImFkX3N0b3JhZ2V8YW5hbHl0aWNzX3N0b3JhZ2V8YWRfdXNlcl9kYXRhfGFkX3BlcnNvbmFsaXphdGlvbiJ9"))}catch(a){O(123),pj(2),qj={}} -var rj=function(){return qj["0"]||""},sj=function(){return qj["1"]||""},tj=function(){var a=!1;a=!!qj["2"];return a},uj=function(){var a="";a=qj["4"]||"";return a},vj=function(){var a=!1;a=!!qj["5"];return a},wj=function(){var a="";a=qj["3"]||""; -return a};var xj=new function(a,b){this.h=a;this.defaultValue=void 0===b?!1:b}(1933);var yj=function(a){yj[" "](a);return a};yj[" "]=function(){};var Aj=function(){var a=zj,b="th";if(a.th&&a.hasOwnProperty(b))return a.th;var c=new a;return a.th=c};var zj=function(){var a={};this.h=function(){var b=xj.h,c=xj.defaultValue;return null!=a[b]?a[b]:c};this.s=function(){a[xj.h]=!0}};var Bj=!1,Cj=!1,Dj={},Ej={},Fj=!1,Gj={ad_storage:!1,ad_user_data:!1,ad_personalization:!1};function Hj(){var a=Ec("google_tag_data",{});return a.ics=a.ics||new Ij}var Ij=function(){this.entries={};this.cps={};this.waitPeriodTimedOut=this.wasSetLate=this.accessedAny=this.accessedDefault=this.usedSetCps=this.usedImplicit=this.usedUpdate=this.usedDefault=this.usedDeclare=this.active=!1;this.h=[]}; -Ij.prototype.default=function(a,b,c,d,e,f){this.usedDefault||this.usedDeclare||!this.accessedDefault&&!this.accessedAny||(this.wasSetLate=!0);this.usedDefault=this.active=!0;Ab("TAGGING",19);void 0==b?Ab("TAGGING",18):Jj(this,a,"granted"===b,c,d,e,f)};Ij.prototype.waitForUpdate=function(a,b){for(var c=0;c=e.length?m(n):z.setTimeout(function(){m(c())},500)}}))};function Xj(){}function Yj(){};var Zj=[Q.g.J,Q.g.R,Q.g.N,Q.g.Ea],ak=function(a){for(var b=a[Q.g.Va],c=Array.isArray(b)?b:[b],d={De:0};d.DeUa()-Yk.C[Yk.h%Yk.s]);if(a||0>=Zk--)O(1),Wk[Vk]=!0;else{var b=Yk.h++%Yk.s;Yk.C[b]=Ua();var c=$k(!0);Nc(c);if(Pk){var d=c.replace("/a?","/td?");Nc(d)}bl=Pk=!1}}}var bl=!1;function cl(a){Wk[a]||(a!==Vk&&(al(),Vk=a),bl=!0,Xk||(Xk=z.setTimeout(al,500)),2022<=$k().length&&al())}var dl=Ka();function el(){dl=Ka()}function fl(){return["&v=3&t=t","&pid="+dl].join("")};var gl=function(a,b,c,d,e,f,g,h,m,n,p,q){this.eventId=a;this.priorityId=b;this.h=c;this.M=d;this.C=e;this.F=f;this.T=g;this.s=h;this.eventMetadata=m;this.onSuccess=n;this.onFailure=p;this.isGtmEvent=q},W=function(a,b,c){if(void 0!==a.h[b])return a.h[b];if(void 0!==a.M[b])return a.M[b];if(void 0!==a.C[b])return a.C[b];Tk&&hl(a,a.F[b],a.T[b])&&(O(71),O(79));return void 0!==a.F[b]?a.F[b]:void 0!==a.s[b]?a.s[b]:c},il=function(a){function b(g){for(var h=Object.keys(g),m=0;md)return null;var e=a.indexOf("&",d);if(0>e||e>c)e=c;d+=b.length+1;return decodeURIComponent(a.slice(d,-1!==e?e:0).replace(/\+/g," "))},Hl=/[?&]($|#)/,Il=function(a,b,c){for(var d,e=a.search(Fl),f=0,g,h=[];0<=(g=El(a,f,b,e));)h.push(a.substring(f, -g)),f=Math.min(a.indexOf("&",g)+1||e,e);h.push(a.slice(f));d=h.join("").replace(Hl,"$1");var m,n=null!=c?"="+encodeURIComponent(String(c)):"";var p=b+n;if(p){var q,r=d.indexOf("#");0>r&&(r=d.length);var t=d.indexOf("?"),u;0>t||t>r?(t=r,u=""):u=d.substring(t+1,r);q=[d.slice(0,t),u,d.slice(r)];var v=q[1];q[1]=p?v?v+"&"+p:p:v;m=q[0]+(q[1]?"?"+q[1]:"")+q[2]}else m=d;return m};var Jl=function(a){try{var b;if(b=!!a&&null!=a.location.href)a:{try{yj(a.foo);b=!0;break a}catch(c){}b=!1}return b}catch(c){return!1}},Kl=function(a,b){if(a)for(var c in a)Object.prototype.hasOwnProperty.call(a,c)&&b(a[c],c,a)};function Ll(a){if(!a||!E.head)return null;var b=Ml("META");E.head.appendChild(b);b.httpEquiv="origin-trial";b.content=a;return b} -var Nl=function(a){if(z.top==z)return 0;if(void 0===a?0:a){var b=z.location.ancestorOrigins;if(b)return b[b.length-1]==z.location.origin?1:2}return Jl(z.top)?1:2},Ml=function(a,b){b=void 0===b?document:b;return b.createElement(String(a).toLowerCase())};function Ol(a,b,c,d){d=void 0===d?!1:d;a.google_image_requests||(a.google_image_requests=[]);var e=Ml("IMG",a.document);if(c){var f=function(){if(c){var g=a.google_image_requests,h=Db(g,e);0<=h&&Array.prototype.splice.call(g,h,1)}e.removeEventListener&&e.removeEventListener("load",f,!1);e.removeEventListener&&e.removeEventListener("error",f,!1)};Al(e,"load",f);Al(e,"error",f)}d&&(e.attributionSrc="");e.src=b;a.google_image_requests.push(e)} -var Ql=function(a){var b;b=void 0===b?!1:b;var c="https://pagead2.googlesyndication.com/pagead/gen_204?id=tcfe";Kl(a,function(d,e){if(d||0===d)c+="&"+e+"="+encodeURIComponent(""+d)});Pl(c,b)},Pl=function(a,b){var c=window,d;b=void 0===b?!1:b;d=void 0===d?!1:d;if(c.fetch){var e={keepalive:!0,credentials:"include",redirect:"follow",method:"get",mode:"no-cors"};d&&(e.mode="cors","setAttributionReporting"in XMLHttpRequest.prototype?e.attributionReporting={eventSourceEligible:"true",triggerEligible:"false"}: -e.headers={"Attribution-Reporting-Eligible":"event-source"});c.fetch(a,e)}else Ol(c,a,void 0===b?!1:b,void 0===d?!1:d)};var Rl=function(){};var Sl=function(a){void 0!==a.addtlConsent&&"string"!==typeof a.addtlConsent&&(a.addtlConsent=void 0);void 0!==a.gdprApplies&&"boolean"!==typeof a.gdprApplies&&(a.gdprApplies=void 0);return void 0!==a.tcString&&"string"!==typeof a.tcString||void 0!==a.listenerId&&"number"!==typeof a.listenerId?2:a.cmpStatus&&"error"!==a.cmpStatus?0:3},Tl=function(a,b){b=void 0===b?{}:b;this.s=a;this.h=null;this.M={};this.Oa=0;var c;this.T=null!=(c=b.ln)?c:500;var d;this.F=null!=(d=b.Tn)?d:!1;this.C=null};ta(Tl,Rl); -var Vl=function(a){return"function"===typeof a.s.__tcfapi||null!=Ul(a)}; -Tl.prototype.addEventListener=function(a){var b=this,c={internalBlockOnErrors:this.F},d=zl(function(){return a(c)}),e=0;-1!==this.T&&(e=setTimeout(function(){c.tcString="tcunavailable";c.internalErrorState=1;d()},this.T));var f=function(g,h){clearTimeout(e);g?(c=g,c.internalErrorState=Sl(c),c.internalBlockOnErrors=b.F,h&&0===c.internalErrorState||(c.tcString="tcunavailable",h||(c.internalErrorState=3))):(c.tcString="tcunavailable",c.internalErrorState=3);a(c)};try{Wl(this,"addEventListener",f)}catch(g){c.tcString= -"tcunavailable",c.internalErrorState=3,e&&(clearTimeout(e),e=0),d()}};Tl.prototype.removeEventListener=function(a){a&&a.listenerId&&Wl(this,"removeEventListener",null,a.listenerId)}; -var Yl=function(a,b,c){var d;d=void 0===d?"755":d;var e;a:{if(a.publisher&&a.publisher.restrictions){var f=a.publisher.restrictions[b];if(void 0!==f){e=f[void 0===d?"755":d];break a}}e=void 0}var g=e;if(0===g)return!1;var h=c;2===c?(h=0,2===g&&(h=1)):3===c&&(h=1,1===g&&(h=0));var m;if(0===h)if(a.purpose&&a.vendor){var n=Xl(a.vendor.consents,void 0===d?"755":d);m=n&&"1"===b&&a.purposeOneTreatment&&"CH"===a.publisherCC?!0:n&&Xl(a.purpose.consents,b)}else m=!0;else m=1===h?a.purpose&&a.vendor?Xl(a.purpose.legitimateInterests, -b)&&Xl(a.vendor.legitimateInterests,void 0===d?"755":d):!0:!0;return m},Xl=function(a,b){return!(!a||!a[b])},Wl=function(a,b,c,d){c||(c=function(){});if("function"===typeof a.s.__tcfapi){var e=a.s.__tcfapi;e(b,2,c,d)}else if(Ul(a)){Zl(a);var f=++a.Oa;a.M[f]=c;if(a.h){var g={};a.h.postMessage((g.__tcfapiCall={command:b,version:2,callId:f,parameter:d},g),"*")}}else c({},!1)},Ul=function(a){if(a.h)return a.h;var b;a:{for(var c=a.s,d=0;50>d;++d){var e;try{e=!(!c.frames||!c.frames.__tcfapiLocator)}catch(h){e= -!1}if(e){b=c;break a}var f;b:{try{var g=c.parent;if(g&&g!=c){f=g;break b}}catch(h){}f=null}if(!(c=f))break}b=null}a.h=b;return a.h},Zl=function(a){a.C||(a.C=function(b){try{var c;c=("string"===typeof b.data?JSON.parse(b.data):b.data).__tcfapiReturn;a.M[c.callId](c.returnValue,c.success)}catch(d){}},Al(a.s,"message",a.C))},$l=function(a){if(!1===a.gdprApplies)return!0;void 0===a.internalErrorState&&(a.internalErrorState=Sl(a));return"error"===a.cmpStatus||0!==a.internalErrorState?a.internalBlockOnErrors? -(Ql({e:String(a.internalErrorState)}),!1):!0:"loaded"!==a.cmpStatus||"tcloaded"!==a.eventStatus&&"useractioncomplete"!==a.eventStatus?!1:!0};var am={1:0,3:0,4:0,7:3,9:3,10:3},bm=xl('',500);function cm(){var a=ri.tcf||{};return ri.tcf=a} -var dm=function(){return new Tl(z,{ln:-1})},km=function(){var a=cm(),b=dm();Vl(b)&&em()&&O(124);if((fm()||S(53))&&!a.active&&Vl(b)){fm()&&(a.active=!0,a.Yb={},a.cmpId=0,a.tcfPolicyVersion=0,S(53)?Hj().active=!0:gm(),a.tcString="tcunavailable");S(53)&&jk();try{b.addEventListener(function(c){if(0!==c.internalErrorState)hm(a),S(53)?(kk([Q.g.J,Q.g.Ea,Q.g.N]),Hj().active=!0):im(a);else{a.gdprApplies=c.gdprApplies;if(S(53)){a.cmpId=c.cmpId;a.enableAdvertiserConsentMode=c.enableAdvertiserConsentMode;!0=== -cm().enableAdvertiserConsentMode&&(a.active=!0);if(jm(c)&&em()){kk([Q.g.J,Q.g.Ea,Q.g.N]);return}a.tcfPolicyVersion=c.tcfPolicyVersion}var d;if(!1===c.gdprApplies){var e={},f;for(f in am)am.hasOwnProperty(f)&&(e[f]=!0);d=e;b.removeEventListener(c)}else if(jm(c)){var g={},h;for(h in am)if(am.hasOwnProperty(h))if("1"===h){var m,n=c,p=!0;p=void 0===p?!1:p;m=$l(n)?!1===n.gdprApplies||"tcunavailable"===n.tcString||void 0===n.gdprApplies&&!p||"string"!==typeof n.tcString||!n.tcString.length?!0:Yl(n,"1", -0):!1;g["1"]=m}else g[h]=Yl(c,h,am[h]);d=g}d&&(a.tcString=c.tcString||"tcempty",a.Yb=d,im(a))}})}catch(c){hm(a),S(53)?(kk([Q.g.J,Q.g.Ea,Q.g.N]),Hj().active=!0):im(a)}}};function hm(a){a.type="e";a.tcString="tcunavailable"}function jm(a){return"tcloaded"===a.eventStatus||"useractioncomplete"===a.eventStatus||"cmpuishown"===a.eventStatus}function gm(){var a={},b=(a[Q.g.J]="denied",a[Q.g.vd]=bm,a);bk(b)} -var fm=function(){return!0===z.gtag_enable_tcf_support},em=function(){var a=fm();return S(53)?!a&&!0!==cm().enableAdvertiserConsentMode:!a}; -function im(a){var b={},c=(b[Q.g.J]=a.Yb["1"]?"granted":"denied",b);if(S(53)){if(!0!==a.gdprApplies){kk([Q.g.J,Q.g.Ea,Q.g.N]);Hj().active=!0;return}c[Q.g.Ea]=a.Yb["3"]&&a.Yb["4"]?"granted":"denied";"number"===typeof a.tcfPolicyVersion&&4<=a.tcfPolicyVersion?c[Q.g.N]=a.Yb["1"]&&a.Yb["7"]?"granted":"denied":kk([Q.g.N])}ck(c,{eventId:0},{gdprApplies:a?a.gdprApplies:void 0,tcString:lm()||""})} -var lm=function(){var a=cm();if(a.active)return a.tcString},mm=function(){var a=cm();if(a.active&&void 0!==a.gdprApplies)return a.gdprApplies?"1":"0"},nm=function(a){if(!am.hasOwnProperty(String(a)))return!0;var b=cm();return b.active&&b.Yb?!!b.Yb[String(a)]:!0};var om=[Q.g.J,Q.g.R],pm=[Q.g.J,Q.g.R,Q.g.N,Q.g.Ea],qm={},rm=(qm[Q.g.J]=1,qm[Q.g.R]=2,qm);function sm(a){if(void 0===a)return 0;switch(W(a,Q.g.ja)){case void 0:return 1;case !1:return 3;default:return 2}} -var tm=function(a){var b=sm(a);if(3===b)return!1;if(S(45))switch(Oj(Q.g.Ea)){case 1:case 3:break;case 2:return!1;case 4:return 2===b;case 0:break;default:return!1}return!0},um=function(){return Rj()||!Nj(Q.g.J)||!Nj(Q.g.R)},vm=function(){var a={},b;for(b in rm)rm.hasOwnProperty(b)&&(a[rm[b]]=Oj(b));var c=S(31)&&om.every(function(e){return Nj(e)}),d=S(27);return c||d?Ne(a,1):Ne(a,0)},wm={},xm=(wm[Q.g.J]=0,wm[Q.g.R]=1,wm[Q.g.N]=2,wm[Q.g.Ea]=3,wm); -function ym(a){switch(a){case void 0:return 1;case !0:return 3;case !1:return 2;default:return 0}} -var zm=function(a){if(S(28)){for(var b="1",c=0;c=c&&(a="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[c>>6&63],b="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[c&63]);var e="0",f;var g=cm();f=g.active&&S(53)?g.tcfPolicyVersion:void 0;"number"===typeof f&&0<=f&&63>=f&&(e="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[f]); -var h=0;qj["6"]&&(h|=1);"1"===mm()&&(h|=2);fm()&&(h|=4);var m;var n=cm();m=void 0!==n.enableAdvertiserConsentMode?n.enableAdvertiserConsentMode?"1":"0":void 0;"1"===m&&(h|=8);Hj().waitPeriodTimedOut&&(h|=16);return"1"+a+b+e+"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[h]};function Fm(a){return"null"!==a.origin};var Gm=function(a,b,c){for(var d=[],e=b.split(";"),f=0;fe?b[c++]=e:(2048>e?b[c++]=e>>6|192:(55296==(e&64512)&&d+1>18|240,b[c++]=e>>12&63|128):b[c++]=e>>12|224,b[c++]=e>>6&63|128),b[c++]=e&63|128)}return b};bc();Cl()||Zb("iPod");Zb("iPad");!Zb("Android")||cc()||bc()||ac()||Zb("Silk");cc();!Zb("Safari")||cc()||($b()?0:Zb("Coast"))||ac()||($b()?0:Zb("Edge"))||($b()?Yb("Microsoft Edge"):Zb("Edg/"))||($b()?Yb("Opera"):Zb("OPR"))||bc()||Zb("Silk")||Zb("Android")||Dl();var hn={},sn=null,tn=function(a){for(var b=[],c=0,d=0;d>=8);b[c++]=e}var f=4;void 0===f&&(f=0);if(!sn){sn={};for(var g="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789".split(""),h=["+/=","+/","-_=","-_.","-_"],m=0;5>m;m++){var n=g.concat(h[m].split(""));hn[m]=n;for(var p=0;p>2],D=r[(x&3)<<4|y>>4],F=r[(y&15)<<2|A>>6],G=r[A&63];t[w++]=""+C+D+F+G}var I=0,J=u;switch(b.length-v){case 2:I=b[v+1],J=r[(I&15)<<2]||u;case 1:var P=b[v];t[w]=""+r[P>>2]+r[(P&3)<<4|I>>4]+J+u}return t.join("")};Object.freeze(new function(){});Object.freeze(new function(){});var un="platform platformVersion architecture model uaFullVersion bitness fullVersionList wow64".split(" ");function vn(a){var b;return null!=(b=a.google_tag_data)?b:a.google_tag_data={}}function wn(){var a=z.google_tag_data,b;if(null!=a&&a.uach){var c=a.uach,d=Object.assign({},c);c.fullVersionList&&(d.fullVersionList=c.fullVersionList.slice(0));b=d}else b=null;return b}function xn(){var a,b;return null!=(b=null==(a=z.google_tag_data)?void 0:a.uach_promise)?b:null} -function yn(a){var b,c;return"function"===typeof(null==(b=a.navigator)?void 0:null==(c=b.userAgentData)?void 0:c.getHighEntropyValues)}function zn(){var a=z;if(!yn(a))return null;var b=vn(a);if(b.uach_promise)return b.uach_promise;var c=a.navigator.userAgentData.getHighEntropyValues(un).then(function(d){null!=b.uach||(b.uach=d);return d});return b.uach_promise=c}; -var An,Bn=function(){if(yn(z)&&(An=Ua(),!xn())){var a=zn();a&&(a.then(function(){O(95);}),a.catch(function(){O(96)}))}},Dn=function(a){var b=Cn.on,c=function(g,h){try{a(g,h)}catch(m){}},d=wn();if(d)c(d);else{var e=xn();if(e){b= -Math.min(Math.max(isFinite(b)?b:0,0),1E3);var f=z.setTimeout(function(){c.Ie||(c.Ie=!0,O(106),c(null,Error("Timeout")))},b);e.then(function(g){c.Ie||(c.Ie=!0,O(104),z.clearTimeout(f),c(g))}).catch(function(g){c.Ie||(c.Ie=!0,O(105),z.clearTimeout(f),c(null,g))})}else c(null)}},En=function(a,b){a&&(b.h[Q.g.Xd]=a.architecture,b.h[Q.g.Yd]=a.bitness,a.fullVersionList&&(b.h[Q.g.Zd]=a.fullVersionList.map(function(c){return encodeURIComponent(c.brand||"")+";"+encodeURIComponent(c.version||"")}).join("|")), -b.h[Q.g.ae]=a.mobile?"1":"0",b.h[Q.g.be]=a.model,b.h[Q.g.ce]=a.platform,b.h[Q.g.de]=a.platformVersion,b.h[Q.g.ee]=a.wow64?"1":"0")};var Fn=/:[0-9]+$/,Gn=/^\d+\.fls\.doubleclick\.net$/,Hn=function(a,b,c,d){function e(r){return Sf(10)?decodeURIComponent(r.replace(/\+/g," ")):decodeURIComponent(r).replace(/\+/g," ")}for(var f=[],g=ia(a.split("&")),h=g.next();!h.done;h=g.next()){var m=ia(h.value.split("=")),n=m.next().value,p=ka(m);if(e(n)===b){var q=p.join("=");if(!c)return d?q:e(q);f.push(d?q:e(q))}}return c?f:void 0},Kn=function(a,b,c,d,e){b&&(b=String(b).toLowerCase());if("protocol"===b||"port"===b)a.protocol=In(a.protocol)|| -In(z.location.protocol);"port"===b?a.port=String(Number(a.hostname?a.port:z.location.port)||("http"===a.protocol?80:"https"===a.protocol?443:"")):"host"===b&&(a.hostname=(a.hostname||z.location.hostname).replace(Fn,"").toLowerCase());return Jn(a,b,c,d,e)},Jn=function(a,b,c,d,e){var f,g=In(a.protocol);b&&(b=String(b).toLowerCase());switch(b){case "url_no_fragment":f=Ln(a);break;case "protocol":f=g;break;case "host":f=a.hostname.replace(Fn,"").toLowerCase();if(c){var h=/^www\d*\./.exec(f);h&&h[0]&& -(f=f.substr(h[0].length))}break;case "port":f=String(Number(a.port)||("http"===g?80:"https"===g?443:""));break;case "path":a.pathname||a.hostname||Ab("TAGGING",1);f="/"===a.pathname.substr(0,1)?a.pathname:"/"+a.pathname;var m=f.split("/");0<=(d||[]).indexOf(m[m.length-1])&&(m[m.length-1]="");f=m.join("/");break;case "query":f=a.search.replace("?","");e&&(f=Hn(f,e,!1));break;case "extension":var n=a.pathname.split(".");f=1c?a.href:a.href.substr(0,c)}return b},Mn=function(a){var b=E.createElement("a");a&&(b.href=a);var c=b.pathname;"/"!==c[0]&&(a||Ab("TAGGING",1),c="/"+c);var d=b.hostname.replace(Fn,"");return{href:b.href,protocol:b.protocol,host:b.host,hostname:d,pathname:c,search:b.search,hash:b.hash,port:b.port}},Nn=function(a){function b(n){var p= -n.split("=")[0];return 0>d.indexOf(p)?n:p+"=0"}function c(n){return n.split("&").map(b).filter(function(p){return void 0!==p}).join("&")}var d="gclid dclid gbraid wbraid gclaw gcldc gclha gclgf gclgb _gl".split(" "),e=Mn(a),f=a.split(/[?#]/)[0],g=e.search,h=e.hash;"?"===g[0]&&(g=g.substring(1));"#"===h[0]&&(h=h.substring(1));g=c(g);h=c(h);""!==g&&(g="?"+g);""!==h&&(h="#"+h);var m=""+f+g+h;"/"===m[m.length-1]&&(m=m.substring(0,m.length-1));return m},On=function(a){var b=Mn(z.location.href),c=Kn(b, -"host",!1);if(c&&c.match(Gn)){var d=Kn(b,"path").split(a+"=");if(1f;f++){for(var g=f,h=0;8>h;h++)g=g&1?g>>>1^3988292384:g>>>1;e[f]=g}d=e}Qn=d;for(var m=4294967295,n=0;n>>8^Qn[(m^c.charCodeAt(n))&255];return((m^-1)>>>0).toString(36)} -function co(){return function(a){var b=Mn(z.location.href),c=b.search.replace("?",""),d=Hn(c,"_gl",!1,!0)||"";a.query=eo(d)||{};var e=Kn(b,"fragment"),f;var g=-1;if(Za(e,"_gl="))g=4;else{var h=e.indexOf("&_gl=");0g)f=void 0;else{var m=e.indexOf("&",g);f=0>m?e.substring(g):e.substring(g,m)}a.fragment=eo(f||"")||{}}} -var fo=function(a){var b=co(),c=Tn();c.data||(c.data={query:{},fragment:{}},b(c.data));var d={},e=c.data;e&&(Xa(d,e.query),a&&Xa(d,e.fragment));return d},eo=function(a){try{var b=go(a,3);if(void 0!==b){for(var c={},d=b?b.split("*"):[],e=0;e+1e;++e){var f=Xn.exec(d);if(f){c=f;break a}d=decodeURIComponent(d)}c=void 0}var g=c;if(g&&"1"===g[1]){var h=g[3],m;a:{for(var n=g[2],p=0;pf.length||!b&&3!==f.length)&&Number(f[1])){d[c[e].Wh]||(d[c[e].Wh]=[]);var g={version:f[0],timestamp:1E3*Number(f[1]),aa:f[2]};b&&3p){n=!0;break b}n=!1}if(!n){var t=Pn(b,m,!0);t.Gb=Io();Sm(g,h,t)}}}}To(Ro(c.gclid,c.gclsrc),!1,b)})},Vo=function(a,b){var c=Ho[a];if(void 0!==c)return b+c},Wo=function(a){return 0!==Yo(a.split(".")).length?1E3*(Number(a.split(".")[1])||0): -0};function No(a){var b=Yo(a.split("."));return 0===b.length?null:{version:b[0],aa:b[2],timestamp:1E3*(Number(b[1])||0),labels:b.slice(3)}}function Yo(a){return 3>a.length||"GCL"!==a[0]&&"1"!==a[0]||!/^\d+$/.test(a[1])||!Go.test(a[2])?[]:a} -var Zo=function(a,b,c,d,e){if(Ia(b)&&Fm(z)){var f=Qo(e),g=function(){for(var h={},m=0;mb};var np=/[A-Z]+/,op=/\s/,pp=function(a,b){if(k(a)){a=Ra(a);var c=a.indexOf("-");if(!(0>c)){var d=a.substring(0,c);if(np.test(d)){var e=a.substring(c+1),f;if(b){var g=function(n){var p=n.indexOf("/");return 0>p?[n]:[n.substring(0,p),n.substring(p+1)]};f=g(e);if("DC"===d&&2===f.length){var h=g(f[1]);2===h.length&&(f[1]=h[0],f.push(h[1]))}}else{f=e.split("/");for(var m=0;mq;q++){var r=p[q].src;if(r){r=r.toLowerCase();if(0===r.indexOf(m)){f=3;break a}1===n&&0===r.indexOf(h)&&(n=2)}}f=n}else f=e;return(2===f||d||"http:"!=z.location.protocol?a:b)+c};var Ep,Fp=!1;function Gp(){Fp=!0;Ep=Ep||{}}var Hp=function(a){Fp||Gp();return Ep[a]};var Ip=function(a,b,c){this.target=a;this.eventName=b;this.o=c;this.h={};this.metadata=B(c.eventMetadata||{});this.isAborted=!1};Ip.prototype.copyToHitData=function(a,b,c){var d=W(this.o,a);void 0===d&&(d=b);if(void 0!==d&&void 0!==c&&k(d)&&S(59))try{d=c(d)}catch(e){}void 0!==d&&(this.h[a]=d)};var Jp=function(a){return a.metadata.source_canonical_id},Kp=function(a,b,c){var d=Hp(a.target.da);return d&&d.hasOwnProperty(b)?d[b]:c};function Lp(a){return{getDestinationId:function(){return a.target.da},getEventName:function(){return a.eventName},setEventName:function(b){a.eventName=b},getHitData:function(b){return a.h[b]},setHitData:function(b,c){a.h[b]=c},setHitDataIfNotDefined:function(b,c){void 0===a.h[b]&&(a.h[b]=c)},copyToHitData:function(b,c){a.copyToHitData(b,c)},getMetadata:function(b){return a.metadata[b]},setMetadata:function(b,c){a.metadata[b]=c},isAborted:function(){return a.isAborted},abort:function(){a.isAborted= -!0},getFromEventContext:function(b){return W(a.o,b)},Dj:function(){return a},getHitKeys:function(){return Object.keys(a.h)}}};var Np=function(a){var b=Mp[a.target.da];if(!a.isAborted&&b)for(var c=Lp(a),d=0;d=f)return!0;(d=d.parentElement)&&(e=z.getComputedStyle(d,null))}return!1}; -var Yq=function(){var a=E.body,b=E.documentElement||a&&a.parentElement,c,d;if(E.compatMode&&"BackCompat"!==E.compatMode)c=b?b.clientHeight:0,d=b?b.clientWidth:0;else{var e=function(f,g){return f&&g?Math.min(f,g):Math.max(f,g)};c=e(b?b.clientHeight:0,a?a.clientHeight:0);d=e(b?b.clientWidth:0,a?a.clientWidth:0)}return{width:d,height:c}},Zq=function(a){var b=Yq(),c=b.height,d=b.width,e=a.getBoundingClientRect(),f=e.bottom-e.top,g=e.right-e.left;return f&&g?(1-Math.min((Math.max(0-e.left,0)+Math.max(e.right- -d,0))/g,1))*(1-Math.min((Math.max(0-e.top,0)+Math.max(e.bottom-c,0))/f,1)):0};var $q=[],ar=!(!z.IntersectionObserver||!z.IntersectionObserverEntry),br=function(a,b,c){for(var d=new z.IntersectionObserver(a,{threshold:c}),e=0;ee[h])for(;f[h]=c[f[h]+1];)d(b[h],m),f[h]++;else if(mc[d]&&(c[d]=0);if(ar){var e=!1;H(function(){e|| -cr(a,b,c)()});return br(function(f){e=!0;for(var g={Ge:0};g.Ge=a.length)return a;var c=a.filter(b);return 0==c.length?a:c},fr=function(a){var b;if(a===E.body)b="body";else{var c;if(a.id)c="#"+a.id;else{var d;if(a.parentElement){var e;a:{var f=a.parentElement;if(f){for(var g=0;g:nth-child("+e+")"}else d="";c=d}b=c}return b},or=function(a){for(var b=[],c=0;cd;d++){var e=c[d];if(!(0<=pr.indexOf(e.tagName.toUpperCase()))&&e.children instanceof HTMLCollection){for(var f=!1,g=0;gg;g++)if(!(0<=qr.indexOf(e.children[g].tagName.toUpperCase()))){f=!0;break}(!f||S(33)&&-1!==rr.indexOf(e.tagName))&&a.push(e)}}return{elements:a,status:1E4Ua()-c.timestamp)return c.result;var d=sr(),e=d.status, -f=[],g,h,m=[];if(!S(33)){if(a.ob&&a.ob.email){var n=or(d.elements);f=mr(n,a&&a.ve);g=lr(f);10I(this,b)},Wd=function(a,b){return I(this,a)>=I(this,b)},Xd=function(a,b){a=I(this,a);b=I(this,b);a instanceof id&&(a=a.h);b instanceof id&&(b=b.h);return a===b},Yd=function(a,b){return!Xd.call(this,a,b)},Zd=function(a,b,c){var d=[];I(this,a)?d=I(this,b):c&&(d=I(this,c));var e=eb(this.h,d);if(e instanceof wa)return e},$d=function(a,b){return I(this,a)>Number(I(this, +b))},ye=function(a,b){return Number(I(this,a))>>>Number(I(this,b))},ze=function(a,b){return Number(I(this,a))&Number(I(this,b))},Ae=function(a,b){return Number(I(this,a))^Number(I(this,b))},Be=function(a,b){return Number(I(this,a))|Number(I(this,b))},Ce=function(){},De=function(a,b,c,d,e){var f=!0;try{var g=I(this,c);if(g instanceof wa)return g}catch(r){if(!(r instanceof pd&&a))throw f=r instanceof pd,r;var h=Ba(this.h),m=new id(r);h.add(b,m);var n=I(this,d),p=eb(h,n);if(p instanceof wa)return p}finally{if(f&& +void 0!==e){var q=I(this,e);if(q instanceof wa)return q}}};var Fe=function(){this.h=new gb;Ee(this)};Fe.prototype.execute=function(a){return this.h.s(a)};var Ee=function(a){var b=function(c,d){var e=new dd(String(c),d);e.Fb();a.h.h.set(String(c),e)};b("map",ce);b("and",Yc);b("contains",ad);b("equals",Zc);b("or",$c);b("startsWith",bd);b("variable",cd)};var He=function(){this.h=new gb;Ge(this)};He.prototype.execute=function(a){return Ie(this.h.s(a))}; +var Je=function(a,b,c){return Ie(a.h.F(b,c))},Ge=function(a){var b=function(c,d){var e=String(c),f=new dd(e,d);f.Fb();a.h.h.set(e,f)};b(0,td);b(1,ud);b(2,wd);b(3,xd);b(56,ze);b(57,we);b(58,ve);b(59,Be);b(60,xe);b(61,ye);b(62,Ae);b(53,yd);b(4,zd);b(5,Ad);b(52,Bd);b(6,Cd);b(49,Dd);b(7,be);b(8,ce);b(9,Ad);b(50,Ed);b(10,Fd);b(12,Gd);b(13,Hd);b(51,Sd);b(47,Kd);b(54,Ld);b(55,Md);b(63,Rd);b(64,Od);b(65,Pd);b(66,Qd);b(15,Td);b(16,Ud);b(17,Ud);b(18,Vd);b(19,Wd);b(20,Xd);b(21,Yd);b(22,Zd);b(23,$d);b(24,ae); +b(25,de);b(26,ee);b(27,fe);b(28,ge);b(29,he);b(45,ie);b(30,je);b(32,ke);b(33,ke);b(34,le);b(35,le);b(46,me);b(36,ne);b(43,oe);b(37,pe);b(38,qe);b(39,re);b(67,De);b(40,se);b(44,Ce);b(41,te);b(42,ue)};function Ie(a){if(a instanceof wa||a instanceof dd||a instanceof rb||a instanceof sb||a instanceof id||null===a||void 0===a||"string"===typeof a||"number"===typeof a||"boolean"===typeof a)return a};var Ke=function(a){this.message=a};function Le(a){var b="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[a];return void 0===b?new Ke("Value "+a+" can not be encoded in web-safe base64 dictionary."):b};function Me(a){switch(a){case 1:return"1";case 2:case 4:return"0";default:return"-"}};var Ne=function(){var a=function(b){return{toString:function(){return b}}};return{tk:a("consent"),ni:a("convert_case_to"),oi:a("convert_false_to"),ri:a("convert_null_to"),si:a("convert_true_to"),ui:a("convert_undefined_to"),In:a("debug_mode_metadata"),ma:a("function"),ah:a("instance_name"),fl:a("live_only"),il:a("malware_disabled"),jl:a("metadata"),nl:a("original_activity_id"),Wn:a("original_vendor_template_id"),Vn:a("once_on_load"),ml:a("once_per_event"),pj:a("once_per_load"),ao:a("priority_override"), +bo:a("respected_consent_types"),uj:a("setup_tags"),qe:a("tag_id"),zj:a("teardown_tags")}}();var jf; +var kf=[],lf=[],mf=[],nf=[],of=[],pf={},qf,rf,sf=function(a){rf=rf||a},tf=function(a){},uf,vf=[],wf=function(a,b){var c={};c[Ne.ma]="__"+a;for(var d in b)b.hasOwnProperty(d)&&(c["vtp_"+d]=b[d]);return c},xf=function(a, +b){var c=a[Ne.ma],d=b&&b.event;if(!c)throw Error("Error: No function name given for function call.");var e=pf[c],f=b&&2===b.type&&d.reportMacroDiscrepancy&&e&&-1!==vf.indexOf(c),g={},h={},m;for(m in a)a.hasOwnProperty(m)&&0===m.indexOf("vtp_")&&(e&&(g[m]=a[m]),!e||f)&&(h[m.substr(4)]=a[m]);e&&d&&d.cachedModelValues&&(g.vtp_gtmCachedValues=d.cachedModelValues);if(b){if(null==b.name){var n;a:{var p=b.type,q=b.index;if(null==q)n="";else{var r;switch(p){case 2:r=kf[q];break;case 1:r=nf[q];break;default:n= +"";break a}var t=r&&r[Ne.ah];n=t?String(t):""}}b.name=n}e&&(g.vtp_gtmEntityIndex=b.index,g.vtp_gtmEntityName=b.name)}var u,v;e&&(u=e(g));if(!e||f)v=jf(c,h,b);f&&d&&(pb(u)?typeof u!==typeof v&&d.reportMacroDiscrepancy(d.id,c):u!==v&&d.reportMacroDiscrepancy(d.id,c));return e?u:v},zf=function(a,b,c){c=c||[];var d={},e;for(e in a)a.hasOwnProperty(e)&&(d[e]=yf(a[e],b,c));return d},yf=function(a,b,c){if(Ha(a)){var d;switch(a[0]){case "function_id":return a[1];case "list":d=[];for(var e=1;ec){var t=String.fromCharCode(10>c?48+c:65+c-10);b["k"+t]=(""+String(e)).replace(/~/g,"~~");b["v"+t]=f;c++}});var d=[];Na(b,function(e,f){d.push(""+e+f)});return d.join("~")}, +bg={item_id:"id",item_name:"nm",item_brand:"br",item_category:"ca",item_category2:"c2",item_category3:"c3",item_category4:"c4",item_category5:"c5",item_variant:"va",price:"pr",quantity:"qt",coupon:"cp",item_list_name:"ln",index:"lp",item_list_id:"li",discount:"ds",affiliation:"af",promotion_id:"pi",promotion_name:"pn",creative_name:"cn",creative_slot:"cs",location_id:"lo"},cg={id:"id",name:"nm",brand:"br",variant:"va",list_name:"ln",list_position:"lp",list:"ln",position:"lp",creative:"cn"},dg=["ca", +"c2","c3","c4","c5"];var fg=function(a){var b=[];Na(a,function(c,d){null!=d&&b.push(encodeURIComponent(c)+"="+encodeURIComponent(String(d)))});return b.join("&")},gg=function(a,b,c,d){this.oa=a.oa;this.Kc=a.Kc;this.rh=a.rh;this.s=b;this.F=c;this.C=fg(a.oa);this.h=fg(a.rh);this.M=this.h.length;if(d&&16384w&&(v=y,w=B)});x==c.length&&(g[t]=v)});ig(g,d);b&&d.push("_s="+b);for(var h=d.join("&"),m=[],n={},p=0;p=Number(b)}function vg(a,b){return Number(a)<=Number(b)}function wg(a,b){return Number(a)>Number(b)}function xg(a,b){return Number(a)>=5;d?d=!1:e|=32;c=Le(e)+c}a<<=2;d||(a|=32);return c=Le(a|b)+c};var Hg=/^([a-z][a-z0-9]*):(!|\?)(\*|string|boolean|number|Fn|PixieMap|List|OpaqueValue)$/i,Ig={Fn:"function",PixieMap:"Object",List:"Array"},K=function(a,b,c){for(var d=0;da.length?void 0:Yg(c.getHitData(a[1]),a.slice(2));case "metadata":return 2>a.length?void 0:Yg(c.getMetadata(a[1]),a.slice(2));case "eventName":return c.getEventName();case "destinationId":return c.getDestinationId();default:throw Error(d+" is not a valid field that can be accessed\n from PreHit data.");}}}, +$g=function(a,b){if(a){if(void 0!==a.contextValue){var c;a:{var d=a.contextValue,e=d.keyParts;if(e&&0!==e.length){var f=d.namespaceType;switch(f){case 1:c=Zg(e,b);break a;case 2:var g=b.macro;c=g?g[e[0]]:void 0;break a;default:throw Error("Unknown Namespace Type used: "+f);}}c=void 0}return c}if(void 0!==a.booleanExpressionValue)return Xg(a.booleanExpressionValue,b);if(void 0!==a.booleanValue)return!!a.booleanValue;if(void 0!==a.stringValue)return String(a.stringValue);if(void 0!==a.integerValue)return Number(a.integerValue); +if(void 0!==a.doubleValue)return Number(a.doubleValue);throw Error("Unknown field used for variable of type ExpressionValue:"+a);}},Xg=function(a,b){var c=a.args;if(!Ha(c)||0===c.length)throw Error('Invalid boolean expression format. Expected "args":'+c+" property to\n be non-empty array.");var d=function(g){return $g(g,b)};switch(a.type){case 1:for(var e=0;ee?c.push(e):2048>e?c.push(192|e>>6,128|e&63):55296>e||57344<=e?c.push(224|e>>12,128|e>>6&63,128|e&63):(e=65536+((e&1023)<<10|a.charCodeAt(++d)&1023),c.push(240|e>>18,128|e>>12&63,128|e>>6&63,128|e&63))}b=new Uint8Array(c)}return b},Fh=/[0-9`~!@#$%^&*()_\-+=:;<>,.?|/\\[\]]/g,Ih=/^\S+@\S+\.\S+$/,Gh=/^\+\d{10,15}$/,Bh=/[.~]/g,Nh=/^[0-9A-Za-z_-]{43}$/,Kh=/^[0-9A-Fa-f]{64}$/,Oh={},Ph=(Oh.email="em",Oh.phone_number="pn",Oh.first_name="fn", +Oh.last_name="ln",Oh.street="sa",Oh.city="ct",Oh.region="rg",Oh.country="co",Oh.postal_code="pc",Oh.error_code="ec",Oh),Qh={},Rh=(Qh.email="sha256_email_address",Qh.phone_number="sha256_phone_number",Qh.first_name="sha256_first_name",Qh.last_name="sha256_last_name",Qh.street="sha256_street",Qh),Th=function(a,b){a.some(function(c){c.value&&Sh.indexOf(c.name)})?b(a):l.Promise?Promise.all(a.map(function(c){return c.value&&-1!==Sh.indexOf(c.name)?Mh(c.value).then(function(d){c.value=d}):Promise.resolve()})).then(function(){b(a)}).catch(function(){b([])}): +b([])},Vh=function(a,b){var c=Uh(a);Th(c,b)},Uh=function(a){function b(r,t,u,v){var w=Ah(r);""!==w&&(Kh.test(w)?h.push({name:t,value:w,index:v}):h.push({name:t,value:u(w),index:v}))}function c(r,t){var u=r;if(k(u)||Ha(u)){u=Ha(r)?r:[r];for(var v=0;v=e.length?m(n):l.setTimeout(function(){m(c())},500)}}))};function Nj(){}function Oj(){};var Pj=[O.g.J,O.g.R,O.g.N,O.g.Ha],Qj=function(a){for(var b=a[O.g.Wa],c=Array.isArray(b)?b:[b],d={Ge:0};d.Gec)){var d=a.substring(0,c);if(ck.test(d)){var e=a.substring(c+1),f;if(b){var g=function(n){var p=n.indexOf("/");return 0>p?[n]:[n.substring(0,p),n.substring(p+1)]};f=g(e);if("DC"===d&&2===f.length){var h=g(f[1]);2===h.length&&(f[1]=h[0],f.push(h[1]))}}else{f=e.split("/");for(var m=0;mc?a.href:a.href.substr(0,c)}return b},ok={},pk=0,qk=function(a){var b=ok[a];if(!b){var c=C.createElement("a");a&&(c.href=a);var d=c.pathname;"/"!==d[0]&&(a||Ab("TAGGING",1),d="/"+d);var e=c.hostname.replace(hk,"");b={href:c.href,protocol:c.protocol,host:c.host,hostname:e,pathname:d,search:c.search,hash:c.hash,port:c.port};5>pk&&(ok[a]=b,pk++)}return b}, +rk=function(a){function b(n){var p=n.split("=")[0];return 0>d.indexOf(p)?n:p+"=0"}function c(n){return n.split("&").map(b).filter(function(p){return void 0!==p}).join("&")}var d="gclid dclid gbraid wbraid gclaw gcldc gclha gclgf gclgb _gl".split(" "),e=qk(a),f=a.split(/[?#]/)[0],g=e.search,h=e.hash;"?"===g[0]&&(g=g.substring(1));"#"===h[0]&&(h=h.substring(1));g=c(g);h=c(h);""!==g&&(g="?"+g);""!==h&&(h="#"+h);var m=""+f+g+h;"/"===m[m.length-1]&&(m=m.substring(0,m.length-1));return m},sk=function(a){var b= +qk(l.location.href),c=mk(b,"host",!1);if(c&&c.match(ik)){var d=mk(b,"path").split(a+"=");if(1Ua()-nl.C[nl.h%nl.s]);if(a||0>=ol--)M(1),ll[kl]=!0;else{var b=nl.h++%nl.s;nl.C[b]=Ua();var c=pl(!0);Mc(c);if(dl){var d=c.replace("/a?","/td?");Mc(d)}rl=dl=!1}}}var rl=!1;function sl(a){ll[a]||(a!==kl&&(ql(),kl=a),rl=!0,ml||(ml=l.setTimeout(ql,500)),2022<=pl().length&&ql())}var tl=Ja();function ul(){tl=Ja()}function vl(){return[["v","3"],["t","t"],["pid",tl]]};var wl="",xl=[];function yl(a){var b=[];wl&&b.push(["dl",encodeURIComponent(wl)]);0d)return null;var e=a.indexOf("&",d);if(0>e||e>c)e=c;d+=b.length+1;return decodeURIComponent(a.slice(d,-1!==e?e:0).replace(/\+/g," "))},xm=/[?&]($|#)/,ym=function(a,b,c){for(var d,e=a.search(vm),f=0,g,h=[];0<=(g=um(a,f,b,e));)h.push(a.substring(f, +g)),f=Math.min(a.indexOf("&",g)+1||e,e);h.push(a.slice(f));d=h.join("").replace(xm,"$1");var m,n=null!=c?"="+encodeURIComponent(String(c)):"";var p=b+n;if(p){var q,r=d.indexOf("#");0>r&&(r=d.length);var t=d.indexOf("?"),u;0>t||t>r?(t=r,u=""):u=d.substring(t+1,r);q=[d.slice(0,t),u,d.slice(r)];var v=q[1];q[1]=p?v?v+"&"+p:p:v;m=q[0]+(q[1]?"?"+q[1]:"")+q[2]}else m=d;return m};var zm=function(a){try{var b;if(b=!!a&&null!=a.location.href)a:{try{kj(a.foo);b=!0;break a}catch(c){}b=!1}return b}catch(c){return!1}},Am=function(a,b){if(a)for(var c in a)Object.prototype.hasOwnProperty.call(a,c)&&b(a[c],c,a)};function Bm(a){if(!a||!C.head)return null;var b=Cm("META");C.head.appendChild(b);b.httpEquiv="origin-trial";b.content=a;return b} +var Dm=function(a){if(l.top==l)return 0;if(void 0===a?0:a){var b=l.location.ancestorOrigins;if(b)return b[b.length-1]==l.location.origin?1:2}return zm(l.top)?1:2},Cm=function(a,b){b=void 0===b?document:b;return b.createElement(String(a).toLowerCase())};function Em(a,b,c,d){d=void 0===d?!1:d;a.google_image_requests||(a.google_image_requests=[]);var e=Cm("IMG",a.document);if(c){var f=function(){if(c){var g=a.google_image_requests,h=Db(g,e);0<=h&&Array.prototype.splice.call(g,h,1)}e.removeEventListener&&e.removeEventListener("load",f,!1);e.removeEventListener&&e.removeEventListener("error",f,!1)};qm(e,"load",f);qm(e,"error",f)}d&&(e.attributionSrc="");e.src=b;a.google_image_requests.push(e)} +var Gm=function(a){var b;b=void 0===b?!1:b;var c="https://pagead2.googlesyndication.com/pagead/gen_204?id=tcfe";Am(a,function(d,e){if(d||0===d)c+="&"+e+"="+encodeURIComponent(""+d)});Fm(c,b)},Fm=function(a,b){var c=window,d;b=void 0===b?!1:b;d=void 0===d?!1:d;if(c.fetch){var e={keepalive:!0,credentials:"include",redirect:"follow",method:"get",mode:"no-cors"};d&&(e.mode="cors","setAttributionReporting"in XMLHttpRequest.prototype?e.attributionReporting={eventSourceEligible:"true",triggerEligible:"false"}: +e.headers={"Attribution-Reporting-Eligible":"event-source"});c.fetch(a,e)}else Em(c,a,void 0===b?!1:b,void 0===d?!1:d)};var Hm=function(){};var Im=function(a){void 0!==a.addtlConsent&&"string"!==typeof a.addtlConsent&&(a.addtlConsent=void 0);void 0!==a.gdprApplies&&"boolean"!==typeof a.gdprApplies&&(a.gdprApplies=void 0);return void 0!==a.tcString&&"string"!==typeof a.tcString||void 0!==a.listenerId&&"number"!==typeof a.listenerId?2:a.cmpStatus&&"error"!==a.cmpStatus?0:3},Jm=function(a,b){b=void 0===b?{}:b;this.s=a;this.h=null;this.M={};this.Pa=0;var c;this.W=null!=(c=b.xn)?c:500;var d;this.F=null!=(d=b.jo)?d:!1;this.C=null};sa(Jm,Hm); +var Lm=function(a){return"function"===typeof a.s.__tcfapi||null!=Km(a)}; +Jm.prototype.addEventListener=function(a){var b=this,c={internalBlockOnErrors:this.F},d=pm(function(){return a(c)}),e=0;-1!==this.W&&(e=setTimeout(function(){c.tcString="tcunavailable";c.internalErrorState=1;d()},this.W));var f=function(g,h){clearTimeout(e);g?(c=g,c.internalErrorState=Im(c),c.internalBlockOnErrors=b.F,h&&0===c.internalErrorState||(c.tcString="tcunavailable",h||(c.internalErrorState=3))):(c.tcString="tcunavailable",c.internalErrorState=3);a(c)};try{Mm(this,"addEventListener",f)}catch(g){c.tcString= +"tcunavailable",c.internalErrorState=3,e&&(clearTimeout(e),e=0),d()}};Jm.prototype.removeEventListener=function(a){a&&a.listenerId&&Mm(this,"removeEventListener",null,a.listenerId)}; +var Om=function(a,b,c){var d;d=void 0===d?"755":d;var e;a:{if(a.publisher&&a.publisher.restrictions){var f=a.publisher.restrictions[b];if(void 0!==f){e=f[void 0===d?"755":d];break a}}e=void 0}var g=e;if(0===g)return!1;var h=c;2===c?(h=0,2===g&&(h=1)):3===c&&(h=1,1===g&&(h=0));var m;if(0===h)if(a.purpose&&a.vendor){var n=Nm(a.vendor.consents,void 0===d?"755":d);m=n&&"1"===b&&a.purposeOneTreatment&&"CH"===a.publisherCC?!0:n&&Nm(a.purpose.consents,b)}else m=!0;else m=1===h?a.purpose&&a.vendor?Nm(a.purpose.legitimateInterests, +b)&&Nm(a.vendor.legitimateInterests,void 0===d?"755":d):!0:!0;return m},Nm=function(a,b){return!(!a||!a[b])},Mm=function(a,b,c,d){c||(c=function(){});if("function"===typeof a.s.__tcfapi){var e=a.s.__tcfapi;e(b,2,c,d)}else if(Km(a)){Pm(a);var f=++a.Pa;a.M[f]=c;if(a.h){var g={};a.h.postMessage((g.__tcfapiCall={command:b,version:2,callId:f,parameter:d},g),"*")}}else c({},!1)},Km=function(a){if(a.h)return a.h;var b;a:{for(var c=a.s,d=0;50>d;++d){var e;try{e=!(!c.frames||!c.frames.__tcfapiLocator)}catch(h){e= +!1}if(e){b=c;break a}var f;b:{try{var g=c.parent;if(g&&g!=c){f=g;break b}}catch(h){}f=null}if(!(c=f))break}b=null}a.h=b;return a.h},Pm=function(a){a.C||(a.C=function(b){try{var c;c=("string"===typeof b.data?JSON.parse(b.data):b.data).__tcfapiReturn;a.M[c.callId](c.returnValue,c.success)}catch(d){}},qm(a.s,"message",a.C))},Qm=function(a){if(!1===a.gdprApplies)return!0;void 0===a.internalErrorState&&(a.internalErrorState=Im(a));return"error"===a.cmpStatus||0!==a.internalErrorState?a.internalBlockOnErrors? +(Gm({e:String(a.internalErrorState)}),!1):!0:"loaded"!==a.cmpStatus||"tcloaded"!==a.eventStatus&&"useractioncomplete"!==a.eventStatus?!1:!0};var Rm={1:0,3:0,4:0,7:3,9:3,10:3};function Sm(){var a=pi.tcf||{};return pi.tcf=a} +var Tm=function(){return new Jm(l,{xn:-1})},Zm=function(){var a=Sm(),b=Tm();Lm(b)&&!Um()&&!Vm()&&M(124);if(!a.active&&Lm(b)){Um()&&(a.active=!0,a.fc={},a.cmpId=0,a.tcfPolicyVersion=0,yj().active=!0,a.tcString="tcunavailable");Zj();try{b.addEventListener(function(c){if(0!==c.internalErrorState)Wm(a),ak([O.g.J,O.g.Ha,O.g.N]),yj().active=!0;else if(a.gdprApplies=c.gdprApplies,a.cmpId=c.cmpId,a.enableAdvertiserConsentMode=c.enableAdvertiserConsentMode,Vm()&&(a.active=!0),!Xm(c)||Um()||Vm()){a.tcfPolicyVersion= +c.tcfPolicyVersion;var d;if(!1===c.gdprApplies){var e={},f;for(f in Rm)Rm.hasOwnProperty(f)&&(e[f]=!0);d=e;b.removeEventListener(c)}else if(Xm(c)){var g={},h;for(h in Rm)if(Rm.hasOwnProperty(h))if("1"===h){var m,n=c,p={Wl:!0};p=void 0===p?{}:p;m=Qm(n)?!1===n.gdprApplies?!0:"tcunavailable"===n.tcString||void 0===n.gdprApplies&&!p.Wl||"string"!==typeof n.tcString||!n.tcString.length?!p.no:Om(n,"1",0):!1;g["1"]=m}else g[h]=Om(c,h,Rm[h]);d=g}if(d){a.tcString=c.tcString||"tcempty";a.fc=d;var q={},r=(q[O.g.J]= +a.fc["1"]?"granted":"denied",q);!0!==a.gdprApplies?(ak([O.g.J,O.g.Ha,O.g.N]),yj().active=!0):(r[O.g.Ha]=a.fc["3"]&&a.fc["4"]?"granted":"denied","number"===typeof a.tcfPolicyVersion&&4<=a.tcfPolicyVersion?r[O.g.N]=a.fc["1"]&&a.fc["7"]?"granted":"denied":ak([O.g.N]),Sj(r,{eventId:0},{gdprApplies:a?a.gdprApplies:void 0,tcString:Ym()||""}))}}else ak([O.g.J,O.g.Ha,O.g.N])})}catch(c){Wm(a),ak([O.g.J,O.g.Ha,O.g.N]),yj().active=!0}}};function Wm(a){a.type="e";a.tcString="tcunavailable"} +function Xm(a){return"tcloaded"===a.eventStatus||"useractioncomplete"===a.eventStatus||"cmpuishown"===a.eventStatus}var Um=function(){return!0===l.gtag_enable_tcf_support};function Vm(){return!0===Sm().enableAdvertiserConsentMode}var Ym=function(){var a=Sm();if(a.active)return a.tcString},$m=function(){var a=Sm();if(a.active&&void 0!==a.gdprApplies)return a.gdprApplies?"1":"0"},an=function(a){if(!Rm.hasOwnProperty(String(a)))return!0;var b=Sm();return b.active&&b.fc?!!b.fc[String(a)]:!0};var bn=[O.g.J,O.g.R,O.g.N,O.g.Ha],cn={},dn=(cn[O.g.J]=1,cn[O.g.R]=2,cn);function en(a){if(void 0===a)return 0;switch(V(a,O.g.aa)){case void 0:return 1;case !1:return 3;default:return 2}} +var fn=function(a){var b=en(a);if(3===b)return!1;switch(Fj(O.g.Ha)){case 1:case 3:return!0;case 2:return!1;case 4:return 2===b;case 0:return!0;default:return!1}},gn=function(){return Ij()||!Ej(O.g.J)||!Ej(O.g.R)},hn=function(){var a={},b;for(b in dn)dn.hasOwnProperty(b)&&(a[dn[b]]=Fj(b));return"G1"+Me(a[1]||0)+Me(a[2]||0)},jn={},kn=(jn[O.g.J]=0,jn[O.g.R]=1,jn[O.g.N]=2,jn[O.g.Ha]=3,jn);function ln(a){switch(a){case void 0:return 1;case !0:return 3;case !1:return 2;default:return 0}} +var mn=function(a){for(var b="1",c=0;c=c&&(a="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[c>>6&63],b="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[c& +63]);var e="0",f;var g=Sm();f=g.active?g.tcfPolicyVersion:void 0;"number"===typeof f&&0<=f&&63>=f&&(e="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[f]);var h=0;fj()&&(h|=1);"1"===$m()&&(h|=2);Um()&&(h|=4);var m;var n=Sm();m=void 0!==n.enableAdvertiserConsentMode?n.enableAdvertiserConsentMode?"1":"0":void 0;"1"===m&&(h|=8);yj().waitPeriodTimedOut&&(h|=16);return"1"+a+b+e+"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_"[h]};var Bn=function(){var a=!1;return a};var Cn={UA:1,AW:2,DC:3,G:4,GF:5,GT:12,GTM:14,HA:6,MC:7},Dn=function(a,b){var c=Sf.ctid.split("-")[0].toUpperCase(),d={};d.ctid=Sf.ctid;d.kn=oi.oe;d.mn=oi.bh;d.Lm=Ek.ke?2:1;d.wc=Sf.Mf;d.wc!==a&&(d.cg=a);R(67)?d.ik=2:R(68)&&(d.ik=1);vi?(d.Yf=Cn[c],d.Yf||(d.Yf=0)):d.Yf=zi?13:10;xi?d.Jh=1:Bn()?d.Jh=2:d.Jh=3;if(R(96)){var e={};e[6]=Fk;R(97)&&!Fk&&(e[1]=!0);d.Hl=e}var f;var g=d.Yf,h=d.Jh;void 0===g?f="":(h||(h=0),f=""+Gg(1,1)+Le(g<<2|h));var m=d.io,n=4+f+(m?""+Gg(2,1)+Le(m):""),p,q=d.mn;p=q&&Fg.test(q)? +""+Gg(3,2)+q:"";var r,t=d.kn;r=t?""+Gg(4,1)+Le(t):"";var u;var v=d.ctid;if(v&&b){var w=v.split("-"),x=w[0].toUpperCase();if("GTM"!==x&&"OPT"!==x)u="";else{var y=w[1];u=""+Gg(5,3)+Le(1+y.length)+(d.Lm||0)+y}}else u="";var A=d.ik,B=d.wc,D=d.cg,G=d.po,E=n+p+r+u+(A?""+Gg(6,1)+Le(A):"")+(B?""+Gg(7,3)+Le(B.length)+B:"")+(D?""+Gg(8,3)+Le(D.length)+D:"")+(G?""+Gg(9,3)+Le(G.length)+G:""),F;var N=d.Hl;N=void 0===N?{}:N;for(var P=[],S=fa(Object.keys(N)),Y=S.next();!Y.done;Y=S.next()){var T=Y.value;P[Number(T)]= +N[T]}if(P.length){var U=Gg(10,3),ja;if(0===P.length)ja=Le(0);else{for(var ia=[],da=0,Ca=!1,na=0;na>21:d;c=String(b^d&2147483647)}else c=String(b);return c},Xn=function(a){return[Wn(a),Math.round(Ua()/1E3)].join(".")},Yn=function(a,b,c,d,e){var f=Tn(b);return Kn(a,f,Un(c),d,e)},Zn=function(a,b,c,d){return[b,Vn(c,d),a].join(".")};function $n(a,b,c,d){var e,f=Number(null!=a.Dc?a.Dc:void 0);0!==f&&(e=new Date((b||Ua())+1E3*(f||7776E3)));return{path:a.path,domain:a.domain,flags:a.flags,encode:!!c,expires:e,Ib:d}};var ao;var fo=function(){var a=bo,b=co,c=eo(),d=function(g){a(g.target||g.srcElement||{})},e=function(g){b(g.target||g.srcElement||{})};if(!c.init){Nc(C,"mousedown",d);Nc(C,"keyup",d);Nc(C,"submit",e);var f=HTMLFormElement.prototype.submit;HTMLFormElement.prototype.submit=function(){b(this);f.call(this)};c.init=!0}},go=function(a,b,c,d,e){var f={callback:a,domains:b,fragment:2===c,placement:c,forms:d,sameHost:e};eo().decorators.push(f)},ho=function(a,b,c){for(var d=eo().decorators,e={},f=0;ff;f++){for(var g=f,h=0;8>h;h++)g=g&1?g>>>1^3988292384:g>>>1;e[f]=g}d=e}ao=d;for(var m=4294967295,n=0;n>>8^ao[(m^c.charCodeAt(n))&255];return((m^-1)>>>0).toString(36)} +function oo(){return function(a){var b=qk(l.location.href),c=b.search.replace("?",""),d=jk(c,"_gl",!1,!0)||"";a.query=po(d)||{};var e=mk(b,"fragment"),f;var g=-1;if(Za(e,"_gl="))g=4;else{var h=e.indexOf("&_gl=");0g)f=void 0;else{var m=e.indexOf("&",g);f=0>m?e.substring(g):e.substring(g,m)}a.fragment=po(f||"")||{}}} +var qo=function(a){var b=oo(),c=eo();c.data||(c.data={query:{},fragment:{}},b(c.data));var d={},e=c.data;e&&(Xa(d,e.query),a&&Xa(d,e.fragment));return d},po=function(a){try{var b=ro(a,3);if(void 0!==b){for(var c={},d=b?b.split("*"):[],e=0;e+1e;++e){var f=io.exec(d);if(f){c=f;break a}d=decodeURIComponent(d)}c=void 0}var g=c;if(g&&"1"===g[1]){var h=g[3],m;a:{for(var n=g[2],p=0;pf.length||!b&&3!==f.length)&&Number(f[1])){d[c[e].fi]||(d[c[e].fi]=[]);var g={version:f[0],timestamp:1E3*Number(f[1]),ia:f[2]};b&&3p){n=!0;break b}n=!1}if(!n){var t=$n(b,m,!0);t.Ib=ap();Qn(g,h,t)}}}}op(mp(c.gclid,c.gclsrc),!1,b)})},qp=function(a,b){var c=$o[a];if(void 0!==c)return b+c},rp=function(a){return 0!==tp(a.split(".")).length?1E3*(Number(a.split(".")[1])||0):0}; +function hp(a){var b=tp(a.split("."));return 0===b.length?null:{version:b[0],ia:b[2],timestamp:1E3*(Number(b[1])||0),labels:b.slice(3)}}function tp(a){return 3>a.length||"GCL"!==a[0]&&"1"!==a[0]||!/^\d+$/.test(a[1])||!Zo.test(a[2])?[]:a} +var up=function(a,b,c,d,e){if(Ha(b)&&"null"!==l.origin){var f=kp(e),g=function(){for(var h={},m=0;mb};var Up,Vp=!1;function Wp(){Vp=!0;Up=Up||{}}var Xp=function(a){Vp||Wp();return Up[a]};var Yp=function(a,b,c){this.target=a;this.eventName=b;this.o=c;this.h={};this.metadata=z(c.eventMetadata||{});this.isAborted=!1};Yp.prototype.copyToHitData=function(a,b,c){var d=V(this.o,a);void 0===d&&(d=b);if(void 0!==d&&void 0!==c&&k(d)&&R(48))try{d=c(d)}catch(e){}void 0!==d&&(this.h[a]=d)};var Zp=function(a){return a.metadata.source_canonical_id},$p=function(a,b,c){var d=Xp(a.target.ba);return d&&d.hasOwnProperty(b)?d[b]:c};var aq=function(){pi.dedupe_gclid||(pi.dedupe_gclid=""+Xn());return pi.dedupe_gclid};var bq=/^(www\.)?google(\.com?)?(\.[a-z]{2}t?)?$/,cq=/^www.googleadservices.com$/,eq=function(a){a||(a=dq());return a.Cn?!1:a.om||a.qm||a.sm||a.yh||a.Qf||a.Vl||a.fm?!0:!1},dq=function(){var a={},b=qo(!0);a.Cn=!!b._up;var c=np();a.om=void 0!==c.aw;a.qm=void 0!==c.dc;a.sm=void 0!==c.wbraid;var d=qk(l.location.href),e=mk(d,"query",!1,void 0,"gad");a.yh=void 0!==e;if(!a.yh){var f=d.hash.replace("#",""),g=jk(f,"gad",!1);a.yh=void 0!==g}a.Qf=void 0;if(R(55)){var h=mk(d,"query",!1,void 0,"gad_source");a.Qf= +h;if(void 0===a.Qf){var m=d.hash.replace("#",""),n=jk(m,"gad_source",!1);a.Qf=n}}var p=C.referrer?mk(qk(C.referrer),"host"):"";a.fm=bq.test(p);a.Vl=cq.test(p);return a};var fq=function(){if(Fa(l.__uspapi)){var a="";try{l.__uspapi("getUSPData",1,function(b,c){if(c&&b){var d=b.uspString;d&&RegExp("^[\\da-zA-Z-]{1,20}$").test(d)&&(a=d)}})}catch(b){}return a}};function nq(a){var b=V(a.o,O.g.Ab),c=V(a.o,O.g.Pb);b&&!c?(a.eventName!==O.g.da&&a.eventName!==O.g.Bd&&M(131),a.isAborted=!0):!b&&c&&(M(132),a.isAborted=!0)}function oq(a){var b=Vj(O.g.J)?pi.pscdl:"denied";a.h[O.g.hf]=b};var wq=function(a,b,c,d){var e=Kc(),f;if(1===e)a:{var g=Bi;g=g.toLowerCase();for(var h="https://"+g,m="http://"+g,n=1,p=C.getElementsByTagName("script"),q=0;qq;q++){var r=p[q].src;if(r){r=r.toLowerCase();if(0===r.indexOf(m)){f=3;break a}1===n&&0===r.indexOf(h)&&(n=2)}}f=n}else f=e;return(2===f||d||"http:"!=l.location.protocol?a:b)+c};function Jq(a){return{getDestinationId:function(){return a.target.ba},getEventName:function(){return a.eventName},setEventName:function(b){a.eventName=b},getHitData:function(b){return a.h[b]},setHitData:function(b,c){a.h[b]=c},setHitDataIfNotDefined:function(b,c){void 0===a.h[b]&&(a.h[b]=c)},copyToHitData:function(b,c){a.copyToHitData(b,c)},getMetadata:function(b){return a.metadata[b]},setMetadata:function(b,c){a.metadata[b]=c},isAborted:function(){return a.isAborted},abort:function(){a.isAborted= +!0},getFromEventContext:function(b){return V(a.o,b)},Nj:function(){return a},getHitKeys:function(){return Object.keys(a.h)}}};var Lq=function(a){var b=Kq[a.target.ba];if(!a.isAborted&&b)for(var c=Jq(a),d=0;d=f)return!0;(d=d.parentElement)&&(e=l.getComputedStyle(d,null))}return!1}; +var Tq=function(){var a=C.body,b=C.documentElement||a&&a.parentElement,c,d;if(C.compatMode&&"BackCompat"!==C.compatMode)c=b?b.clientHeight:0,d=b?b.clientWidth:0;else{var e=function(f,g){return f&&g?Math.min(f,g):Math.max(f,g)};c=e(b?b.clientHeight:0,a?a.clientHeight:0);d=e(b?b.clientWidth:0,a?a.clientWidth:0)}return{width:d,height:c}},Uq=function(a){var b=Tq(),c=b.height,d=b.width,e=a.getBoundingClientRect(),f=e.bottom-e.top,g=e.right-e.left;return f&&g?(1-Math.min((Math.max(0-e.left,0)+Math.max(e.right- +d,0))/g,1))*(1-Math.min((Math.max(0-e.top,0)+Math.max(e.bottom-c,0))/f,1)):0};var Vq=[],Wq=!(!l.IntersectionObserver||!l.IntersectionObserverEntry),Xq=function(a,b,c){for(var d=new l.IntersectionObserver(a,{threshold:c}),e=0;ee[h])for(;f[h]=c[f[h]+1];)d(b[h],m),f[h]++;else if(mc[d]&&(c[d]=0);if(Wq){var e=!1;H(function(){e|| +Yq(a,b,c)()});return Xq(function(f){e=!0;for(var g={Je:0};g.Je=a.length)return a;var c=a.filter(b);return 0==c.length?a:c},ar=function(a){var b;if(a===C.body)b="body";else{var c;if(a.id)c="#"+a.id;else{var d;if(a.parentElement){var e;a:{var f=a.parentElement;if(f){for(var g=0;g:nth-child("+e+")"}else d="";c=d}b=c}return b},jr=function(a){for(var b=[],c=0;cd;d++){var e=c[d];if(!(0<=kr.indexOf(e.tagName.toUpperCase()))&&e.children instanceof HTMLCollection){for(var f=!1,g=0;gg;g++)if(!(0<=lr.indexOf(e.children[g].tagName.toUpperCase()))){f=!0;break}(!f||R(25)&&-1!==mr.indexOf(e.tagName))&&a.push(e)}}return{elements:a,status:1E4Ua()-c.timestamp)return c.result;var d=nr(),e=d.status, +f=[],g,h,m=[];if(!R(25)){if(a.sb&&a.sb.email){var n=jr(d.elements);f=hr(n,a&&a.ye);g=gr(f);10Ua()-d){Ab("TAGGING",9);return}try{c.parentNode.removeChild(c)}catch(e){}c=void 0}else try{if(50<=E.querySelectorAll('iframe[allow="join-ad-interest-group"][data-tagging-id*="-"]').length){Ab("TAGGING",10);return}}catch(e){}Mc(a,void 0,{allow:"join-ad-interest-group"},{taggingId:b,loadTime:Ua()},c)}function ws(){return"https://td.doubleclick.net"};var xs=RegExp("^UA-\\d+-\\d+%3A[\\w-]+(?:%2C[\\w-]+)*(?:%3BUA-\\d+-\\d+%3A[\\w-]+(?:%2C[\\w-]+)*)*$"),ys=/^~?[\w-]+(?:\.~?[\w-]+)*$/,zs=/^\d+\.fls\.doubleclick\.net$/,As=/;gac=([^;?]+)/,Bs=/;gacgb=([^;?]+)/,Cs=/;gclaw=([^;?]+)/,Ds=/;gclgb=([^;?]+)/; -function Es(a,b){if(zs.test(E.location.host)){var c=E.location.href.match(b);return c&&2==c.length&&c[1].match(xs)?decodeURIComponent(c[1]):""}var d=[],e;for(e in a){for(var f=[],g=a[e],h=0;hc.indexOf(h))if(m&&0c.indexOf(m[t])){O(11);r=!1;break a}}else{r=!1;break a}r=!0}n=r}var u=!1;if(d){var v=0<=e.indexOf(h);if(v)u=v;else{var w=Ma(e,m||[]);w&&O(10);u=w}}var x=!n||u;x||!(0<=m.indexOf("sandboxedScripts"))||c&&-1!==c.indexOf("sandboxedScripts")||(x=Ma(e,Yu));return f[h]=x}},$u=!1; -$u=!0;var Zu=function(){return Vu.test(z.location&&z.location.hostname)},bv=function(){if(rk){var a=function(b){var c=xf(b),d;if(Cf(c)){var e=c[Oe.na];if(!e)throw"Error: No function name given for function call.";var f=qf[e];d=!!f&&!!f.runInSiloedMode}else d=!!Pu(c[Oe.na],4);return d};S(104)?Ru(zk(),function(b){return a(b.entityId)}):Ru(zk(),a)}};var dv=function(a,b,c,d,e){if(!cv()&&!Fk(a)){var f="?id="+encodeURIComponent(a)+"&l="+qi.fa,g=0===a.indexOf("GTM-");g||(f+="&cx=c");S(44)&&(f+=">m="+dn());var h=Tp();h&&(f+="&sign="+qi.zf);var m=c?"/gtag/js":"/gtm.js",n=zi||Bi?Sp(b,m+f):void 0;if(!n){var p=qi.xd+m;h&&Dc&&g&&(p=Dc.replace(/^(?:https?:\/\/)?/i,"").split(/[?#]/)[0]);n=sp("https://","http://",p+f)}var q=a;d.siloed&&(Ik({ctid:q,isDestination:!1}),q=tk(q));var r=q,t=Hk();nk().container[r]={state:1,context:d,parent:t};ok({ctid:r,isDestination:!1}, -e);Kc(n)}},ev=function(a,b,c,d){if(!cv()&&!Gk(a))if(Jk())nk().destination[a]={state:0,transportUrl:b,context:c,parent:Hk()},ok({ctid:a,isDestination:!0},d),O(91);else{var e="/gtag/destination?id="+encodeURIComponent(a)+"&l="+qi.fa+"&cx=c";S(44)&&(e+=">m="+dn());Tp()&&(e+="&sign="+qi.zf);var f=zi||Bi?Sp(b,e):void 0;f||(f=sp("https://","http://",qi.xd+e));var g=a;c.siloed&&(Ik({ctid:g,isDestination:!0}),g=tk(g));nk().destination[g]={state:1,context:c,parent:Hk()};ok({ctid:g,isDestination:!0},d);Kc(f)}}; -function cv(){if(bn()){return!0}return!1};var fv=!1,gv=0,hv=[];function iv(a){if(!fv){var b=E.createEventObject,c="complete"==E.readyState,d="interactive"==E.readyState;if(!a||"readystatechange"!=a.type||c||!b&&d){fv=!0;for(var e=0;egv){gv++;try{E.documentElement.doScroll("left"),iv()}catch(a){z.setTimeout(jv,50)}}}var kv=function(a){fv?a():hv.push(a)};var lv=function(){this.F=0;this.h={}};lv.prototype.addListener=function(a,b,c){var d=++this.F;this.h[a]=this.h[a]||{};this.h[a][String(d)]={listener:b,qb:c};return d};lv.prototype.s=function(a,b){var c=this.h[a],d=String(b);if(!c||!c[d])return!1;delete c[d];return!0};lv.prototype.C=function(a,b){var c=[];l(this.h[a],function(d,e){0>c.indexOf(e.listener)&&(void 0===e.qb||0<=b.indexOf(e.qb))&&c.push(e.listener)});return c};var mv=function(a,b,c){return{entityType:a,indexInOriginContainer:b,nameInOriginContainer:c,originContainerId:yk()}};var ov=function(a,b){this.h=!1;this.F=[];this.M={tags:[]};this.T=!1;this.s=this.C=0;nv(this,a,b)},pv=function(a,b,c,d){if(ui.hasOwnProperty(b)||"__zone"===b)return-1;var e={};nb(d)&&(e=B(d,e));e.id=c;e.status="timeout";return a.M.tags.push(e)-1},qv=function(a,b,c,d){var e=a.M.tags[b];e&&(e.status=c,e.executionTime=d)},rv=function(a){if(!a.h){for(var b=a.F,c=0;c=a.C&&rv(a)})},tv=function(a){a.T=!0;a.s>=a.C&&rv(a)};var uv={},wv=function(){return z[vv()]},xv=!1; -function vv(){return z.GoogleAnalyticsObject||"ga"} -var Av=function(a){},Bv=function(a,b){return function(){var c=wv(),d=c&&c.getByName&&c.getByName(a);if(d){var e=d.get("sendHitTask");d.set("sendHitTask",function(f){var g=f.get("hitPayload"),h=f.get("hitCallback"),m=0>g.indexOf("&tid="+b);m&&(f.set("hitPayload",g.replace(/&tid=UA-[0-9]+-[0-9]+/,"&tid="+b),!0),f.set("hitCallback",void 0,!0));e(f);m&&(f.set("hitPayload", -g,!0),f.set("hitCallback",h,!0),f.set("_x_19",void 0,!0),e(f))})}}};var Gv={},Hv={};function Iv(a,b){if(Tk){var c;c=b.match(/^(gtm|gtag)\./)?encodeURIComponent(b):"*";Gv[a]="&e="+c+"&eid="+a;cl(a)}}function Jv(a){var b=a.eventId,c=a.ac;if(!Gv[b])return"";var d=Hv[b]?"":"&es=1";d+=Gv[b];c&&(Hv[b]=!0);return d};var Kv={};function Lv(a,b){Tk&&(Kv[a]=Kv[a]||{},Kv[a][b]=(Kv[a][b]||0)+1)}function Mv(a){var b=a.eventId,c=a.ac,d=Kv[b]||{},e=[],f;for(f in d)d.hasOwnProperty(f)&&e.push(""+f+d[f]);c&&delete Kv[b];return e.length?"&md="+e.join("."):""};var Nv={},Ov={aev:"1",c:"2",jsm:"3",v:"4",j:"5",smm:"6",rmm:"7",input:"8"}; -function Pv(a,b,c){if(Tk){Nv[a]=Nv[a]||[];var d=Ov[b]||"0",e;e=c instanceof z.Element?"1":c instanceof z.Event?"2":c instanceof z.RegExp?"3":c===z?"4":c===E?"5":c instanceof z.Promise?"6":c instanceof z.Storage?"7":c instanceof z.Date?"8":c instanceof z.History?"9":c instanceof z.Performance?"a":c===z.crypto?"b":c instanceof z.Location?"c":c instanceof z.Navigator?"d":"object"!==typeof c||nb(c)?"0":"e";Nv[a].push(""+d+e)}} -function Qv(a){var b=a.eventId,c=Nv[b]||[];if(!c.length)return"";a.ac&&delete Nv[b];return"&pcr="+c.join(".")};var Rv={},Sv={};function Tv(a,b,c){if(Tk&&b){var d=lk(b);Rv[a]=Rv[a]||[];Rv[a].push(c+d);var e=(Cf(b)?"1":"2")+d;Sv[a]=Sv[a]||[];Sv[a].push(e);cl(a)}}function Uv(a){var b=a.eventId,c=a.ac,d="",e=Rv[b]||[];e.length&&(d+="&tr="+e.join("."));var f=Sv[b]||[];f.length&&(d+="&ti="+f.join("."));c&&(delete Rv[b],delete Sv[b]);return d};function Vv(a,b,c,d){var e=of[a],f=Wv(a,b,c,d);if(!f)return null;var g=zf(e[Oe.kj],c,[]);if(g&&g.length){var h=g[0];f=Vv(h.index,{onSuccess:f,onFailure:1===h.Aj?b.terminate:f,terminate:b.terminate},c,d)}return f} -function Wv(a,b,c,d){function e(){if(f[Oe.Tk])h();else{var w=Af(f,c,[]),x=w[Oe.kk];if(null!=x)for(var y=0;ye?1:dh?1:ga.length)&&k(a[1])){var e={};if(2G.indexOf(C)&&G.push(C)}}else{gx(f.id);var I=f.id,J=e[Q.g.Qd]||"default";J=J.toString().split(",");for(var P=0;PU.indexOf(I)&&U.push(I)}}delete e[Q.g.Qd];var aa=b.eventMetadata||{};aa.hasOwnProperty("is_external_event")||(aa.is_external_event=!b.fromContainerExecution);b.eventMetadata=aa;delete e[Q.g.Xc];for(var V=u?[f.id]:wk(),T=0;Ta.length)&&k(c)){var d;if(2b)b=0;var c=ri[qi.fa],d=0,e=!1,f=void 0;f=z.setTimeout(function(){e||(e=!0,a());f=void 0},b);return function(){var g=c?c.subscribers:1;++d===g&&(f&&(z.clearTimeout(f),f=void 0),e||(a(),e=!0))}}; -function Zx(a,b){var c=a._clear||b.overwriteModelFields;l(a,function(e,f){"_clear"!==e&&(c&&Vi(e),Vi(e,f))});Gi||(Gi=a["gtm.start"]);var d=a["gtm.uniqueEventId"];if(!a.event)return!1;"number"!==typeof d&&(d=Li(),a["gtm.uniqueEventId"]=d,Vi("gtm.uniqueEventId",d));return cx(a)} -function $x(a){if(null==a||"object"!==typeof a)return!1;if(a.event)return!0;if(Na(a)){var b=a[0];if("config"===b||"event"===b||"js"===b||"get"===b)return!0}return!1} -function ay(){var a;if(Sx.length)a=Sx.shift();else if(Rx.length)a=Rx.shift();else return;var b;var c=a;if(Tx||!$x(c.message))b=c;else{Tx=!0;var d=c.message["gtm.uniqueEventId"];"number"!==typeof d&&(d=c.message["gtm.uniqueEventId"]=Li());var e={},f={message:(e.event="gtm.init_consent",e["gtm.uniqueEventId"]=d-2,e),messageContext:{eventId:d-2}},g={},h={message:(g.event="gtm.init",g["gtm.uniqueEventId"]=d-1,g),messageContext:{eventId:d-1}};Rx.unshift(h,c);if(Tk){var m=Tf.ctid;if(m){var n,p=Ak(Hk()); -n=p&&p.context;var q,r=Mn(z.location.href);q=r.hostname+r.pathname;var t=n&&n.fromContainerExecution,u=n&&n.source,v=Tf.Ef,w=qk.je;Tk&&(Vp||(Vp=q),Wp.push(m+";"+v+";"+(t?1:0)+";"+(u||0)+";"+(w?1:0)))}}b=f}return b} -function by(){for(var a=!1,b;!Ux&&(b=ay());){Ux=!0;delete Pi.eventModel;Ri();var c=b,d=c.message,e=c.messageContext;if(null==d)Ux=!1;else{e.fromContainerExecution&&Wi();try{if(Ga(d))try{d.call(Ti)}catch(x){}else if(Ia(d)){var f=d;if(k(f[0])){var g=f[0].split("."),h=g.pop(),m=f.slice(1),n=Si(g.join("."),2);if(null!=n)try{n[h].apply(n,m)}catch(x){}}}else{var p=void 0,q=!1;if(Na(d)){a:{if(d.length&&k(d[0])){var r=qx[d[0]];if(r&&(!e.fromContainerExecution||!rx[d[0]])){p=r(d,e);break a}}p=void 0}(q=p&& -"set"===d[0]&&!!p.event)&&O(101)}else p=d;if(p){var t=Zx(p,e);a=t||a;q&&t&&O(113)}}}finally{e.fromContainerExecution&&Ri(!0);var u=d["gtm.uniqueEventId"];if("number"===typeof u){for(var v=Qx[String(u)]||[],w=0;wPx&&(Px=u)}Ux=!1}}}return!a} -function dy(){if(S(17)){var a=ey();}var b=by();if(S(17)){}try{sx(yk())}catch(c){}return b} -function hw(a){if(Pxn)for(O(4),c.pruned=!0;this.length>n;)this.shift();var p="boolean"!==typeof m||m;return by()&&p};var e=b.slice(0).map(function(f){return a(f)});Rx.push.apply(Rx,e);if(ey()){if(S(17)){}H(dy)}},ey=function(){var a=!0;return a};function gy(a){if(null==a||0===a.length)return!1;var b=Number(a),c=Ua();return bc-9E5}function hy(a){return a&&0===a.indexOf("pending:")?gy(a.substr(8)):!1}; - -var Cy=function(){};var Dy=function(){};Dy.prototype.toString=function(){return"undefined"};var Ey=new Dy; -var Ly=function(a,b,c){var d={event:b,"gtm.element":a,"gtm.elementClasses":Vc(a,"className"),"gtm.elementId":a["for"]||Qc(a,"id")||"","gtm.elementTarget":a.formTarget||Vc(a,"target")||""};c&&(d["gtm.triggers"]=c.join(","));d["gtm.elementUrl"]=(a.attributes&&a.attributes.formaction?a.formAction:"")||a.action||Vc(a,"href")||a.src||a.code||a.codebase||"";return d},My=function(a){ri.hasOwnProperty("autoEventsSettings")||(ri.autoEventsSettings={});var b=ri.autoEventsSettings;b.hasOwnProperty(a)||(b[a]= -{});return b[a]},Ny=function(a,b,c){My(a)[b]=c},Oy=function(a,b,c,d){var e=My(a),f=Va(e,b,d);e[b]=c(f)},Py=function(a,b,c){var d=My(a);return Va(d,b,c)},Qy=function(a,b){Py(a,"init",!1)||(Ny(a,"init",!0),b())},Ry=function(a){return"string"===typeof a?a:String(Li())};var Xy=!!z.MutationObserver,Yy=void 0,Zy=function(a){if(!Yy){var b=function(){var c=E.body;if(c)if(Xy)(new MutationObserver(function(){for(var e=0;e=p?0:Math.round(q/p*100),t=E.hidden?!1:.5<=Zq(c);d();var u=void 0;void 0!==b&&(u=[b]);var v=Ly(c,"gtm.video",u);v["gtm.videoProvider"]="youtube";v["gtm.videoStatus"]=g;v["gtm.videoUrl"]=n.url;v["gtm.videoTitle"]=n.title;v["gtm.videoDuration"]= -Math.round(p);v["gtm.videoCurrentTime"]=Math.round(q);v["gtm.videoElapsedTime"]=Math.round(f);v["gtm.videoPercent"]=r;v["gtm.videoVisible"]=t;return v},Tj:function(){e=Ua()},hd:function(){d()}}};var lz=z.clearTimeout,mz=z.setTimeout,nz=function(a,b,c,d){if(bn()){b&&H(b)}else return Kc(a,b,c,d)},oz=function(){return new Date},pz=function(){return z.location.href},qz=function(a){return Kn(Mn(a),"fragment")},rz=function(a){return Ln(Mn(a))},sz=function(a,b){return Si(a,b||2)},tz=function(a,b,c){return b?Xx(a,b,c):Wx(a)},uz=function(a,b){z[a]=b},X=function(a,b,c){b&&(void 0===z[a]||c&&!z[a])&&(z[a]=b);return z[a]}, -vz=function(a,b,c){return Jm(a,b,void 0===c?!0:!!c)},wz=function(a,b,c){return 0===Sm(a,b,c)},xz=function(a,b){if(bn()){b&&H(b)}else Mc(a,b)},yz=function(a){return!!Py(a,"init",!1)},zz=function(a){Ny(a,"init",!0)},Az=function(a,b,c){pb(a)||Pv(c,b,a)}; - -function Xz(a,b){function c(g){var h=Mn(g),m=Kn(h,"protocol"),n=Kn(h,"host",!0),p=Kn(h,"port"),q=Kn(h,"path").toLowerCase().replace(/\/$/,"");if(void 0===m||"http"===m&&"80"===p||"https"===m&&"443"===p)m="web",p="default";return[m,n,p,q]}for(var d=c(String(a)),e=c(String(b)),f=0;f=a.Cc)a.Ac&&z.clearInterval(a.Ac);else{a.Ec++;var b=Ua();Wx({event:a.eventName,"gtm.timerId":a.Ac,"gtm.timerEventNumber":a.Ec,"gtm.timerInterval":a.interval,"gtm.timerLimit":a.Cc,"gtm.timerStartTime":a.Te,"gtm.timerCurrentTime":b,"gtm.timerElapsedTime":b-a.Te,"gtm.triggers":a.Xh})}}} -function FC(a,b){ -return b}FC.D="internal.enableAutoEventOnTimer";var xc=fa(["data-gtm-yt-inspected-"]),GC=["www.youtube.com","www.youtube-nocookie.com"],HC,IC=!1; -var JC=function(a,b,c){var d=a.map(function(g){return{Ca:g,Se:g,Qe:void 0}});if(!b.length)return d;var e=b.map(function(g){return{Ca:g*c,Se:void 0,Qe:g}});if(!d.length)return e;var f=d.concat(e);f.sort(function(g,h){return g.Ca-h.Ca});return f},KC=function(a){a=void 0===a?[]:a;for(var b=[],c=0;ca[c]||b.push(a[c]);b.sort(function(d,e){return d-e});return b},LC=function(a){a=void 0===a?[]:a;for(var b=[],c=0;ca[c]||(b[c]=a[c]/100);b.sort(function(d,e){return d- -e});return b},MC=function(a,b){var c,d;function e(){t=kz(function(){return{url:w,title:x,ih:v,wj:a.getCurrentTime(),playbackRate:y}},b.qb,a.getIframe());v=0;x=w="";y=1;return f}function f(F){switch(F){case 1:v=Math.round(a.getDuration());w=a.getVideoUrl();if(a.getVideoData){var G=a.getVideoData();x=G?G.title:""}y=a.getPlaybackRate();b.ah?Wx(t.createEvent("start")):t.hd();u=JC(b.Oh,b.Nh,a.getDuration());return g(F);default:return f}}function g(){A=a.getCurrentTime();C=Sa().getTime();t.Tj();r();return h} -function h(F){var G;switch(F){case 0:return n(F);case 2:G="pause";case 3:var I=a.getCurrentTime()-A;G=1a.getDuration())return;F=(G.Ca-a.getCurrentTime())/y;if(0>F&&(u.shift(),0===u.length))return}while(0>F);c=function(){d=0;c=p;0d.Le+60*f))return a;var g=lF(b);if(!g)return a;g.Ic=d.Ic+1;var h;return null!=(h=mF(g.sessionId,g.Ic,g.pd,g.Le,g.Ah,g.Dc,g.ue))?h:b},qF=function(a,b){var c=b.metadata.cookie_options,d=oF(b,c),e=$m(a,pF[0],c.domain,c.path),f={Gb:Q.g.R,domain:c.domain,path:c.path,expires:c.Xb?new Date(Ua()+1E3*c.Xb):void 0,flags:c.flags};Sm(d, -void 0,f);return 1!==Sm(d,e,f)},rF=function(a){var b=a.metadata.cookie_options,c=oF(a,b),d=Zm(c,b.domain,b.path,pF,Q.g.R);if(!d)return d;var e=Jm(c,void 0,void 0,Q.g.R);if(d&&1m.length)){var n=Number(m[5]);n&&(!g||n>g)&&(g=n,f=e[h])}}f&&f.substring(f.length-d.length,f.length)!==d&&(O(115),d=f.split(".").slice(2).join("."))}return d},mF=function(a,b,c,d,e,f,g){if(a&&b){var h=[a,b,Oa(c),d,e];h.push(f?"1":"0"); -h.push(g||"0");return h.join(".")}},pF=["GS1"],oF=function(a,b){return b.prefix+"_ga_"+a.target.P[0]},lF=function(a){if(a){var b=a.split(".");if(!(5>b.length||7b.length&&O(67);var c=Number(b[1]),d=Number(b[3]),e=Number(b[4]||0);c||O(118);d||O(119);isNaN(e)&&O(120);if(c&&d&&!isNaN(e))return{sessionId:b[0],Ic:c,pd:!!Number(b[2]),Le:d,Ah:e,Dc:"1"===b[5],ue:"0"!==b[6]?b[6]:void 0}}}},sF=function(a){return mF(a.h[Q.g.Cb],a.h[Q.g.Vd],a.h[Q.g.Ud],Math.floor(a.metadata.event_start_timestamp_ms/ -1E3),a.metadata.join_timer_sec||0,!!a.metadata[Q.g.ef],a.h[Q.g.Hd])}; -var tF=function(a){var b=W(a.o,Q.g.Ma),c=a.o.s[Q.g.Ma];if(c===b)return c;var d=B(b);c&&c[Q.g.X]&&(d[Q.g.X]=(d[Q.g.X]||[]).concat(c[Q.g.X]));return d},uF=function(a,b){var c=fo(!0);return"1"!==c._up?{}:{clientId:c[a],Sf:c[b]}},vF=function(a,b,c){var d=fo(!0),e=d[b];e&&(gF(a,e,2),jF(e,a));var f=d[c];f&&qF(f,a);return{clientId:e,Sf:f}},wF=!1,xF=function(a){var b=tF(a)||{},c=a.metadata.cookie_options,d=c.prefix+"_ga",e=oF(a,c),f={};po(b[Q.g.qc],!!b[Q.g.X])&&(f=vF(a,d,e),f.clientId&&f.Sf&&(wF=!0));b[Q.g.X]&& -mo(function(){var g={},h=hF(a);h&&(g[d]=h);var m=rF(a);m&&(g[e]=m);var n=Jm("FPLC",void 0,void 0,Q.g.R);n.length&&(g._fplc=n[0]);return g},b[Q.g.X],b[Q.g.Nb],!!b[Q.g.zb]);return f},zF=function(a){if(!W(a.o,Q.g.Db))return{};var b=a.metadata.cookie_options,c=b.prefix+"_ga",d=oF(a,b);no(function(){var e;if(fk("analytics_storage"))e={};else{var f={};e=(f._up="1",f[c]=a.h[Q.g.ub],f[d]=sF(a),f)}return e},1);return!fk("analytics_storage")&&yF()?uF(c,d):{}},yF=function(){var a=Jn(z.location,"host"),b=Jn(Mn(E.referrer), -"host");return a&&b?a===b||0<=a.indexOf("."+b)||0<=b.indexOf("."+a)?!0:!1:!1};var AF=function(){var a=Ua(),b=a+864E5,c=20,d=5E3;return function(){var e=Ua();e>=b&&(b=e+864E5,d=5E3);if(1>d)return!1;c=Math.min(c+(e-a)/1E3*5,20);a=e;if(1>c)return!1;d--;c--;return!0}}; -var BF=function(a,b){um()&&(a.gcs=vm(),b.metadata.is_consent_update&&(a.gcu="1"));S(28)&&(a.gcd=zm(b.o));tm(b.o)&&(S(53)||zD())?S(32)&&(a.npa="0"):a.npa="1"},EF=function(a){if(a.metadata.is_merchant_center)return"https://www.merchant-center-analytics.goog/mc/collect";var b=Sp(Up(a.o),"/g/collect");if(b)return b;var c=UE(a),d=W(a.o,Q.g.ib);return c&&!tj()&&!1!==d&&yD()&&fk(Q.g.J)&&fk(Q.g.R)?CF():DF()},FF=!1;FF=!0; -var GF={};GF[Q.g.ub]="cid";GF[Q.g.ff]="_fid";GF[Q.g.wg]="_geo";GF[Q.g.xb]="gdid";GF[Q.g.Yc]="ir";GF[Q.g.La]="ul";GF[Q.g.Sd]="_rdi";GF[Q.g.Bb]="sr";GF[Q.g.Ri]="tid";GF[Q.g.rf]="tt";GF[Q.g.fe]="ec_mode";GF[Q.g.aj]="gtm_up";GF[Q.g.Xd]="uaa";GF[Q.g.Yd]="uab";GF[Q.g.Zd]="uafvl";GF[Q.g.ae]="uamb";GF[Q.g.be]="uam";GF[Q.g.ce]="uap";GF[Q.g.de]="uapv";GF[Q.g.ee]="uaw"; -GF[Q.g.Kb]="are";GF[Q.g.Si]="ur";GF[Q.g.jf]="lps";GF[Q.g.Hg]="pae";var HF={};HF[Q.g.Kc]="cc";HF[Q.g.Lc]="ci";HF[Q.g.Mc]="cm";HF[Q.g.Nc]="cn";HF[Q.g.Pc]="cs";HF[Q.g.Qc]="ck";HF[Q.g.xa]="cu";HF[Q.g.Aa]="dl";HF[Q.g.Na]="dr";HF[Q.g.Ab]="dt";HF[Q.g.Ud]="seg";HF[Q.g.Cb]="sid";HF[Q.g.Vd]="sct";HF[Q.g.Ta]="uid";S(21)&&(HF[Q.g.ad]="dp");var IF={};IF[Q.g.Gd]="_et";IF[Q.g.vb]="edid"; -var JF={};JF[Q.g.Kc]="cc";JF[Q.g.Lc]="ci";JF[Q.g.Mc]="cm";JF[Q.g.Nc]="cn";JF[Q.g.Pc]="cs";JF[Q.g.Qc]="ck";var KF={},LF=Object.freeze((KF[Q.g.Ba]=1,KF)),DF=function(){var a="www";FF&&uj()&&(a=uj());return"https://"+a+".google-analytics.com/g/collect"},CF=function(){var a;FF&&""!==uj()&&(a=uj());return"https://"+(a?a+".":"")+"analytics.google.com/g/collect"},MF=function(a,b,c){var d={},e={},f={};d.v="2";d.tid=a.target.da;Kp(a,"google_ono",!1)&&!tj()&&(d._ono=1);d.gtm=dn(Jp(a));d._p=S(80)?Gi:eF();c&& -(d.em=c);a.metadata.create_google_join&&(d._gaz=1);BF(d,a);S(30)&&(Dm()&&(d.dma_cps=Am()),d.dma=Cm());S(53)&&Vl(dm())&&(d.tcfd=Em());Ni()&&(d.exp=Ni());var g=a.h[Q.g.xb];g&&(d.gdid=g);e.en=String(a.eventName);a.metadata.is_first_visit&&(e._fv=a.metadata.is_first_visit_conversion?2:1);a.metadata.is_new_to_site&&(e._nsi=1);a.metadata.is_session_start&&(e._ss=a.metadata.is_session_start_conversion?2:1);a.metadata.is_conversion&&(e._c=1);a.metadata.is_external_event&&(e._ee=1);if(a.metadata.is_ecommerce){var h= -a.h[Q.g.Z]||W(a.o,Q.g.Z);if(Ia(h))for(var m=0;mm;m++)e["pr"+(m+1)]=fg(h[m])}var n=a.h[Q.g.vb];n&&(e.edid=n);var p=function(t,u){if("object"!==typeof u||!LF[t]){var v="ep."+t,w="epn."+t;t=Ha(u)?w:v;var x=Ha(u)?v:w;e.hasOwnProperty(x)&&delete e[x];e[t]=String(u)}},q=S(68)&&VE(a);l(a.h,function(t,u){if(void 0!==u&&!bi.hasOwnProperty(t)){null===u&&(u="");var v;t!==Q.g.Hd?v=!1:a.metadata.euid_mode_enabled||q?(d.ecid=u,v=!0):v=void 0;if(!v&&t!==Q.g.ef){var w=u;!0===u&&(w="1");!1===u&&(w= -"0");w=String(w);var x;if(GF[t])x=GF[t],d[x]=w;else if(HF[t])x=HF[t],f[x]=w;else if(IF[t])x=IF[t],e[x]=w;else if("_"===t.charAt(0))d[t]=w;else{var y;JF[t]?y=!0:t!==Q.g.Oc?y=!1:("object"!==typeof u&&p(t,u),y=!0);y||p(t,u)}}}});(function(t){VE(a)&&"object"===typeof t&&l(t||{},function(u,v){"object"!==typeof v&&(d["sst."+u]=String(v))})})(a.h[Q.g.oe]);var r=a.h[Q.g.Ya]||{};S(8)&&!1===W(a.o,Q.g.ib)&&(d.ngs="1");l(r,function(t,u){void 0!==u&&((null===u&&(u=""),t!==Q.g.Ta||f.uid)?b[t]!==u&&(e[(Ha(u)?"upn.": -"up.")+String(t)]=String(u),b[t]=u):f.uid=String(u))});return hg.call(this,{la:d,Jc:f,kh:e},EF(a),VE(a))||this};ta(MF,hg);var NF=function(a){this.s=a;this.C="";this.h=this.s},OF=function(a,b){a.h=b;return a};function PF(a){var b=a.search;return a.protocol+"//"+a.hostname+a.pathname+(b?b+"&richsstsse":"?richsstsse")}function QF(a,b,c){if(a){var d=a||[],e=nb(b)?b:{};if(Array.isArray(d))for(var f=0;fthis.s){var f=z.setTimeout,g;VE(a)?$F?($F=!1,g=aG):g=bG:g=5E3;this.s=f.call(z,function(){return c.flush()},g)}}else{var h=kg(d,this.F++);XF(d.s,h.params,h.body,d.F);var m=a.metadata.create_dc_join,n=a.metadata.create_google_join,p=!1!==W(a.o,Q.g.Ga),q=tm(a.o),r={eventId:a.o.eventId,priorityId:a.o.priorityId},t=!1;S(95)&&(t=a.h[Q.g.Hg]);var u={bn:m,dn:n,Sl:wj(),rl:p,ql:q,im:tj(),hm:a.metadata.euid_mode_enabled,Vn:r,Rm:t,o:a.o};YF(d,u)}iu(a.o.eventId,a.eventName)}; -ZF.prototype.add=function(a){a.metadata.euid_mode_enabled&&!ZE?this.T(a):this.C(a)};ZF.prototype.flush=function(){if(this.h.events.length){var a=lg(this.h,this.F++);XF(this.h.s,a.params,a.body,this.h.C);this.h=new ig;0<=this.s&&(z.clearTimeout(this.s),this.s=-1)}};ZF.prototype.T=function(a){var b=this,c=WE(a);c?Zh(c,function(d){b.C(a,1===d.split("~").length?void 0:d)}):this.C(a)};var WF=function(a,b,c){var d=a+"?"+b;if(c)try{Cc.sendBeacon&&Cc.sendBeacon(d,c)}catch(e){Ab("TAGGING",15)}else Uc(d)}, -aG=xl('',500),bG=xl('',5E3),$F=!0;var cG=function(a,b,c){void 0===c&&(c={});if("object"===typeof b)for(var d in b)cG(a+"."+d,b[d],c);else c[a]=b;return c},dG=function(a){if(VE(a)){if(S(68)){var b=Kp(a,"ccd_add_1p_data",!1)?1:0;YE(a,"ude",b)}var c=function(e){var f=cG(Q.g.Ba,e);l(f,function(g,h){a.h[g]=h})},d=W(a.o,Q.g.Ba);void 0!==d?(c(d),S(63)&&(a.h[Q.g.fe]="c")):c(a.metadata.user_data);a.metadata.user_data=void 0}};var eG=window,fG=document,gG=function(a){var b=eG._gaUserPrefs;if(b&&b.ioo&&b.ioo()||fG.documentElement.hasAttribute("data-google-analytics-opt-out")||a&&!0===eG["ga-disable-"+a])return!0;try{var c=eG.external;if(c&&c._gaUserPrefs&&"oo"==c._gaUserPrefs)return!0}catch(f){}for(var d=Gm("AMP_TOKEN",String(fG.cookie),!0),e=0;epa.Le+60*ca&&(Ea=!0,pa.sessionId=String(ja),pa.Ic++,pa.pd=!1,pa.ue=void 0);if(Ea)a.metadata.is_session_start=!0,I.Yl(a);else if(I.Pl()>Da||a.eventName==Q.g.jc)pa.pd=!0;a.metadata.euid_mode_enabled?W(a.o,Q.g.Ta)?pa.Dc=!0:(pa.Dc&&(pa.ue=void 0),pa.Dc=!1):pa.Dc=!1;var Ta=pa.ue,mb=S(68)&&VE(a);if(a.metadata.euid_mode_enabled||mb){var Ib=W(a.o,Q.g.Hd),Jc=Ib?1:8;Ib||(Ib=Ta,Jc=4);Ib||(Ib=Xm(),Jc=7);var ae=Ib.toString(),gh=Jc,jj=a.metadata.enhanced_client_id_source;if(void 0=== -jj||gh<=jj)a.h[Q.g.Hd]=ae,a.metadata.enhanced_client_id_source=gh}J?(a.copyToHitData(Q.g.Cb,pa.sessionId),a.copyToHitData(Q.g.Vd,pa.Ic),a.copyToHitData(Q.g.Ud,pa.pd?1:0)):(a.h[Q.g.Cb]=pa.sessionId,a.h[Q.g.Vd]=pa.Ic,a.h[Q.g.Ud]=pa.pd?1:0);a.metadata[Q.g.ef]=pa.Dc?1:0;rG(a);if(!W(a.o,Q.g.Mb)||!W(a.o,Q.g.yb)){var hh="",ih=E.location;if(ih){var kj=ih.pathname||"";"/"!=kj.charAt(0)&&(kj="/"+kj);hh=ih.protocol+"//"+ih.hostname+kj+ih.search}a.copyToHitData(Q.g.Aa,hh,kG);var yI=a.copyToHitData,zI=Q.g.Na, -lj;a:{var jw=Jm("_opt_expid",void 0,void 0,Q.g.R)[0];if(jw){var kw=decodeURIComponent(jw).split("$");if(3===kw.length){lj=kw[2];break a}}if(void 0!==ri.ga4_referrer_override)lj=ri.ga4_referrer_override;else{var lw=Si("gtm.gtagReferrer."+a.target.da),AI=E.referrer;lj=lw?""+lw:AI}}yI.call(a,zI,lj||void 0,kG);a.copyToHitData(Q.g.Ab,E.title);a.copyToHitData(Q.g.La,(Cc.language||"").toLowerCase());var mw=Wq();a.copyToHitData(Q.g.Bb,mw.width+"x"+mw.height);S(21)&&a.copyToHitData(Q.g.ad,void 0,kG);S(56)&& -Vq()&&a.copyToHitData(Q.g.jf,"1")}a.metadata.create_dc_join=!1;a.metadata.create_google_join=!1;if(!(S(39)&&VE(a)||a.metadata.is_merchant_center||!1===W(a.o,Q.g.ib))&&yD()&&fk(Q.g.J)){var nw=UE(a);(a.metadata.is_session_start||W(a.o,Q.g.vg))&&(a.metadata.create_dc_join=!!nw);var ow;ow=a.metadata.join_timer_sec;nw&&0===(ow||0)&&(a.metadata.join_timer_sec=60,a.metadata.create_google_join=!0)}sG(a);fi.hasOwnProperty(a.eventName)&&(a.metadata.is_ecommerce=!0,a.copyToHitData(Q.g.Z),a.copyToHitData(Q.g.xa)); -a.copyToHitData(Q.g.rf);for(var pw=W(a.o,Q.g.hf)||[],jn=0;jne?b[c++]=e:(2048>e?b[c++]=e>>6|192:(55296==(e&64512)&&d+1>18|240,b[c++]=e>>12&63|128):b[c++]=e>>12|224,b[c++]=e>>6&63|128),b[c++]=e&63|128)}return b};bc();sm()||Zb("iPod");Zb("iPad");!Zb("Android")||cc()||bc()||ac()||Zb("Silk");cc();!Zb("Safari")||cc()||($b()?0:Zb("Coast"))||ac()||($b()?0:Zb("Edge"))||($b()?Yb("Microsoft Edge"):Zb("Edg/"))||($b()?Yb("Opera"):Zb("OPR"))||bc()||Zb("Silk")||Zb("Android")||tm();var vs={},ws=null,xs=function(a){for(var b=[],c=0,d=0;d>=8);b[c++]=e}var f=4;void 0===f&&(f=0);if(!ws){ws={};for(var g="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789".split(""),h=["+/=","+/","-_=","-_.","-_"],m=0;5>m;m++){var n=g.concat(h[m].split(""));vs[m]=n;for(var p=0;p>2],D=r[(x&3)<<4|y>>4],G=r[(y&15)<<2|A>>6],E=r[A&63];t[w++]=""+B+D+G+E}var F=0,N=u;switch(b.length-v){case 2:F=b[v+1],N=r[(F&15)<<2]||u;case 1:var P=b[v];t[w]=""+r[P>>2]+r[(P&3)<<4|F>>4]+N+u}return t.join("")};Object.freeze(new function(){});Object.freeze(new function(){});var ys="platform platformVersion architecture model uaFullVersion bitness fullVersionList wow64".split(" ");function zs(a){var b;return null!=(b=a.google_tag_data)?b:a.google_tag_data={}}function As(){var a=l.google_tag_data,b;if(null!=a&&a.uach){var c=a.uach,d=Object.assign({},c);c.fullVersionList&&(d.fullVersionList=c.fullVersionList.slice(0));b=d}else b=null;return b}function Bs(){var a,b;return null!=(b=null==(a=l.google_tag_data)?void 0:a.uach_promise)?b:null} +function Cs(a){var b,c;return"function"===typeof(null==(b=a.navigator)?void 0:null==(c=b.userAgentData)?void 0:c.getHighEntropyValues)}function Ds(){var a=l;if(!Cs(a))return null;var b=zs(a);if(b.uach_promise)return b.uach_promise;var c=a.navigator.userAgentData.getHighEntropyValues(ys).then(function(d){null!=b.uach||(b.uach=d);return d});return b.uach_promise=c}; +var Es,Fs=function(){if(Cs(l)&&(Es=Ua(),!Bs())){var a=Ds();a&&(a.then(function(){M(95);}),a.catch(function(){M(96)}))}},Hs=function(a){var b=Gs.An,c=function(g,h){try{a(g,h)}catch(m){}},d=As();if(d)c(d);else{var e=Bs();if(e){b= +Math.min(Math.max(isFinite(b)?b:0,0),1E3);var f=l.setTimeout(function(){c.Le||(c.Le=!0,M(106),c(null,Error("Timeout")))},b);e.then(function(g){c.Le||(c.Le=!0,M(104),l.clearTimeout(f),c(g))}).catch(function(g){c.Le||(c.Le=!0,M(105),l.clearTimeout(f),c(null,g))})}else c(null)}},Is=function(a,b){a&&(b.h[O.g.Yd]=a.architecture,b.h[O.g.Zd]=a.bitness,a.fullVersionList&&(b.h[O.g.ae]=a.fullVersionList.map(function(c){return encodeURIComponent(c.brand||"")+";"+encodeURIComponent(c.version||"")}).join("|")), +b.h[O.g.be]=a.mobile?"1":"0",b.h[O.g.ce]=a.model,b.h[O.g.de]=a.platform,b.h[O.g.ee]=a.platformVersion,b.h[O.g.fe]=a.wow64?"1":"0")};function Js(){return"attribution-reporting"}function Ks(a){var b;b=void 0===b?document:b;var c;return!(null==(c=b.featurePolicy)||!c.allowedFeatures().includes(a))};var Ls=!1;function Ms(){if(Ks("join-ad-interest-group")&&Fa(Cc.joinAdInterestGroup))return!0;Ls||(Bm('AymqwRC7u88Y4JPvfIF2F37QKylC04248hLCdJAsh8xgOfe/dVJPV3XS3wLFca1ZMVOtnBfVjaCMTVudWM//5g4AAAB7eyJvcmlnaW4iOiJodHRwczovL3d3dy5nb29nbGV0YWdtYW5hZ2VyLmNvbTo0NDMiLCJmZWF0dXJlIjoiUHJpdmFjeVNhbmRib3hBZHNBUElzIiwiZXhwaXJ5IjoxNjk1MTY3OTk5LCJpc1RoaXJkUGFydHkiOnRydWV9'),Ls=!0);return Ks("join-ad-interest-group")&&Fa(Cc.joinAdInterestGroup)} +function Ns(a,b){var c=void 0;try{c=C.querySelector('iframe[data-tagging-id="'+b+'"]')}catch(e){}if(c){var d=Number(c.dataset.loadTime);if(d&&6E4>Ua()-d){Ab("TAGGING",9);return}try{c.parentNode.removeChild(c)}catch(e){}c=void 0}else try{if(50<=C.querySelectorAll('iframe[allow="join-ad-interest-group"][data-tagging-id*="-"]').length){Ab("TAGGING",10);return}}catch(e){}Lc(a,void 0,{allow:"join-ad-interest-group"},{taggingId:b,loadTime:Ua()},c)}function Os(){return"https://td.doubleclick.net"};var Ps=RegExp("^UA-\\d+-\\d+%3A[\\w-]+(?:%2C[\\w-]+)*(?:%3BUA-\\d+-\\d+%3A[\\w-]+(?:%2C[\\w-]+)*)*$"),Qs=/^~?[\w-]+(?:\.~?[\w-]+)*$/,Rs=/^\d+\.fls\.doubleclick\.net$/,Ss=/;gac=([^;?]+)/,Ts=/;gacgb=([^;?]+)/,Us=/;gclaw=([^;?]+)/,Vs=/;gclgb=([^;?]+)/; +function Ws(a,b){if(Rs.test(C.location.host)){var c=C.location.href.match(b);return c&&2==c.length&&c[1].match(Ps)?decodeURIComponent(c[1]):""}var d=[],e;for(e in a){for(var f=[],g=a[e],h=0;hc.indexOf(h))if(m&&0c.indexOf(m[t])){M(11);r=!1;break a}}else{r=!1;break a}r=!0}n=r}var u=!1;if(d){var v=0<=e.indexOf(h);if(v)u=v;else{var w=La(e,m||[]);w&&M(10);u=w}}var x=!n||u;x||!(0<=m.indexOf("sandboxedScripts"))||c&&-1!==c.indexOf("sandboxedScripts")||(x=La(e,sv));return f[h]=x}},uv=!1; +uv=!0;var tv=function(){return pv.test(l.location&&l.location.hostname)},wv=function(){if(Fk){var a=function(b){var c=wf(b),d;if(Bf(c)){var e=c[Ne.ma];if(!e)throw"Error: No function name given for function call.";var f=pf[e];d=!!f&&!!f.runInSiloedMode}else d=!!gv(c[Ne.ma],4);return d};R(81)?jv(Nk(),function(b){return a(b.entityId)}):jv(Nk(),a)}};var yv=function(a,b,c,d,e){if(!xv()){var f=d.siloed?Hk(a):a;if(!Uk(f)){var g="?id="+encodeURIComponent(a)+"&l="+oi.ja,h=0===a.indexOf("GTM-");h||(g+="&cx=c");R(36)&&(g+=">m="+Dn());var m=wk();m&&(g+="&sign="+oi.Hf);var n=c?"/gtag/js":"/gtm.js",p=vk()?uk(b,n+g):void 0;if(!p){var q=oi.Ad+n;m&&Dc&&h?(q=Dc.replace(/^(?:https?:\/\/)?/i,"").split(/[?#]/)[0],p=wq("https://","http://",q+g)):p=Ii.s?Ji()+n+g:wq("https://","http://",q+g)}d.siloed&&Xk({ctid:f,isDestination:!1});var r=Wk();Bk().container[f]= +{state:1,context:d,parent:r};Ck({ctid:f,isDestination:!1},e);Jc(p)}}},zv=function(a,b,c,d){if(!xv()){var e=c.siloed?Hk(a):a;if(!Vk(e))if(Yk())Bk().destination[e]={state:0,transportUrl:b,context:c,parent:Wk()},Ck({ctid:e,isDestination:!0},d),M(91);else{var f="/gtag/destination?id="+encodeURIComponent(a)+"&l="+oi.ja+"&cx=c";R(36)&&(f+=">m="+Dn());wk()&&(f+="&sign="+oi.Hf);var g=vk()?uk(b,f):void 0;g||(g=Ii.s?Ji()+f:wq("https://","http://",oi.Ad+f));c.siloed&&Xk({ctid:e,isDestination:!0});Bk().destination[e]= +{state:1,context:c,parent:Wk()};Ck({ctid:e,isDestination:!0},d);Jc(g)}}};function xv(){if(Bn()){return!0}return!1};var Av=!1,Bv=0,Cv=[];function Dv(a){if(!Av){var b=C.createEventObject,c="complete"==C.readyState,d="interactive"==C.readyState;if(!a||"readystatechange"!=a.type||c||!b&&d){Av=!0;for(var e=0;eBv){Bv++;try{C.documentElement.doScroll("left"),Dv()}catch(a){l.setTimeout(Ev,50)}}}var Fv=function(a){Av?a():Cv.push(a)};var Gv=function(){this.F=0;this.h={}};Gv.prototype.addListener=function(a,b,c){var d=++this.F;this.h[a]=this.h[a]||{};this.h[a][String(d)]={listener:b,ub:c};return d};Gv.prototype.s=function(a,b){var c=this.h[a],d=String(b);if(!c||!c[d])return!1;delete c[d];return!0};Gv.prototype.C=function(a,b){var c=[];Na(this.h[a],function(d,e){0>c.indexOf(e.listener)&&(void 0===e.ub||0<=b.indexOf(e.ub))&&c.push(e.listener)});return c};var Hv=function(a,b,c){return{entityType:a,indexInOriginContainer:b,nameInOriginContainer:c,originContainerId:Mk()}};var Jv=function(a,b){this.h=!1;this.F=[];this.M={tags:[]};this.W=!1;this.s=this.C=0;Iv(this,a,b)},Kv=function(a,b,c,d){if(si.hasOwnProperty(b)||"__zone"===b)return-1;var e={};nb(d)&&(e=z(d,e));e.id=c;e.status="timeout";return a.M.tags.push(e)-1},Lv=function(a,b,c,d){var e=a.M.tags[b];e&&(e.status=c,e.executionTime=d)},Mv=function(a){if(!a.h){for(var b=a.F,c=0;c=a.C&&Mv(a)})},Ov=function(a){a.W=!0;a.s>=a.C&&Mv(a)};var Pv={},Rv=function(){return l[Qv()]},Sv=!1; +function Qv(){return l.GoogleAnalyticsObject||"ga"} +var Vv=function(a){},Wv=function(a,b){return function(){var c=Rv(),d=c&&c.getByName&&c.getByName(a);if(d){var e=d.get("sendHitTask");d.set("sendHitTask",function(f){var g=f.get("hitPayload"),h=f.get("hitCallback"),m=0>g.indexOf("&tid="+b);m&&(f.set("hitPayload",g.replace(/&tid=UA-[0-9]+-[0-9]+/,"&tid="+b),!0),f.set("hitCallback",void 0,!0));e(f);m&&(f.set("hitPayload", +g,!0),f.set("hitCallback",h,!0),f.set("_x_19",void 0,!0),e(f))})}}};var aw=["es","1"],bw={},cw={};function dw(a,b){if(hl){var c;c=b.match(/^(gtm|gtag)\./)?encodeURIComponent(b):"*";bw[a]=[["e",c],["eid",a]];sl(a)}}function ew(a){var b=a.eventId,c=a.Lc;if(!bw[b])return[];var d=[];cw[b]||d.push(aw);d.push.apply(d,ka(bw[b]));c&&(cw[b]=!0);return d};var fw={};function gw(a,b){hl&&(fw[a]=fw[a]||{},fw[a][b]=(fw[a][b]||0)+1)}function hw(a){var b=a.eventId,c=a.Lc,d=fw[b]||{},e=[],f;for(f in d)d.hasOwnProperty(f)&&e.push(""+f+d[f]);c&&delete fw[b];return e.length?[["md",e.join(".")]]:[]};var iw={},jw={};function kw(a,b,c){if(hl&&b){var d=zk(b);iw[a]=iw[a]||[];iw[a].push(c+d);var e=(Bf(b)?"1":"2")+d;jw[a]=jw[a]||[];jw[a].push(e);sl(a)}}function lw(a){var b=a.eventId,c=a.Lc,d=[],e=iw[b]||[];e.length&&d.push(["tr",e.join(".")]);var f=jw[b]||[];f.length&&d.push(["ti",f.join(".")]);c&&(delete iw[b],delete jw[b]);return d};function mw(a,b,c,d){var e=nf[a],f=nw(a,b,c,d);if(!f)return null;var g=yf(e[Ne.uj],c,[]);if(g&&g.length){var h=g[0];f=mw(h.index,{onSuccess:f,onFailure:1===h.Kj?b.terminate:f,terminate:b.terminate},c,d)}return f} +function nw(a,b,c,d){function e(){if(f[Ne.il])h();else{var w=zf(f,c,[]),x=w[Ne.tk];if(null!=x)for(var y=0;ye?1:dh?1:ga.length)&&k(a[1])){var e={};if(2E.indexOf(B)&&E.push(B)}}else{Bx(f.id);var F=f.id,N=e[O.g.Rd]||"default";N=N.toString().split(",");for(var P=0;PS.indexOf(F)&&S.push(F)}}delete e[O.g.Rd];var Y=b.eventMetadata||{};Y.hasOwnProperty("is_external_event")||(Y.is_external_event=!b.fromContainerExecution);b.eventMetadata=Y;delete e[O.g.Yc];for(var T=u?[f.id]:Kk(),U=0;Ua.length)&&k(c)){var d;if(2b)b=0;var c=pi[oi.ja],d=0,e=!1,f=void 0;f=l.setTimeout(function(){e||(e=!0,a());f=void 0},b);return function(){var g=c?c.subscribers:1;++d===g&&(f&&(l.clearTimeout(f),f=void 0),e||(a(),e=!0))}}; +function ty(a,b){var c=a._clear||b.overwriteModelFields;Na(a,function(e,f){"_clear"!==e&&(c&&Si(e),Si(e,f))});Ci||(Ci=a["gtm.start"]);var d=a["gtm.uniqueEventId"];if(!a.event)return!1;"number"!==typeof d&&(d=Gi(),a["gtm.uniqueEventId"]=d,Si("gtm.uniqueEventId",d));return xx(a)} +function uy(a){if(null==a||"object"!==typeof a)return!1;if(a.event)return!0;if(Oa(a)){var b=a[0];if("config"===b||"event"===b||"js"===b||"get"===b)return!0}return!1} +function vy(){var a;if(my.length)a=my.shift();else if(ly.length)a=ly.shift();else return;var b;var c=a;if(ny||!uy(c.message))b=c;else{ny=!0;var d=c.message["gtm.uniqueEventId"];"number"!==typeof d&&(d=c.message["gtm.uniqueEventId"]=Gi());var e={},f={message:(e.event="gtm.init_consent",e["gtm.uniqueEventId"]=d-2,e),messageContext:{eventId:d-2}},g={},h={message:(g.event="gtm.init",g["gtm.uniqueEventId"]=d-1,g),messageContext:{eventId:d-1}};ly.unshift(h,c);if(hl){var m=Sf.ctid;if(m){var n,p=Ok(Wk()); +n=p&&p.context;var q,r=qk(l.location.href);q=r.hostname+r.pathname;var t=n&&n.fromContainerExecution,u=n&&n.source,v=Sf.Mf,w=Ek.ke;hl&&(wl||(wl=q),xl.push(m+";"+v+";"+(t?1:0)+";"+(u||0)+";"+(w?1:0)))}}b=f}return b} +function wy(){for(var a=!1,b;!oy&&(b=vy());){oy=!0;delete Mi.eventModel;Oi();var c=b,d=c.message,e=c.messageContext;if(null==d)oy=!1;else{e.fromContainerExecution&&Ti();try{if(Fa(d))try{d.call(Qi)}catch(x){}else if(Ha(d)){var f=d;if(k(f[0])){var g=f[0].split("."),h=g.pop(),m=f.slice(1),n=Pi(g.join("."),2);if(null!=n)try{n[h].apply(n,m)}catch(x){}}}else{var p=void 0,q=!1;if(Oa(d)){a:{if(d.length&&k(d[0])){var r=Lx[d[0]];if(r&&(!e.fromContainerExecution||!Mx[d[0]])){p=r(d,e);break a}}p=void 0}(q=p&& +"set"===d[0]&&!!p.event)&&M(101)}else p=d;if(p){var t=ty(p,e);a=t||a;q&&t&&M(113)}}}finally{e.fromContainerExecution&&Oi(!0);var u=d["gtm.uniqueEventId"];if("number"===typeof u){for(var v=ky[String(u)]||[],w=0;wjy&&(jy=u)}oy=!1}}}return!a} +function yy(){if(R(15)){var a=zy();}var b=wy();if(R(15)){}try{Nx(Mk())}catch(c){}return b} +function ax(a){if(jyn)for(M(4),c.pruned=!0;this.length>n;)this.shift();var p="boolean"!==typeof m||m;return wy()&&p};var e=b.slice(0).map(function(f){return a(f)});ly.push.apply(ly,e);if(zy()){if(R(15)){}H(yy)}},zy=function(){var a=!0;return a};function By(a){if(null==a||0===a.length)return!1;var b=Number(a),c=Ua();return bc-9E5}function Cy(a){return a&&0===a.indexOf("pending:")?By(a.substr(8)):!1}; + +var Xy=function(){};var Yy=function(){};Yy.prototype.toString=function(){return"undefined"};var Zy=new Yy; +var fz=function(a,b,c){var d={event:b,"gtm.element":a,"gtm.elementClasses":Uc(a,"className"),"gtm.elementId":a["for"]||Pc(a,"id")||"","gtm.elementTarget":a.formTarget||Uc(a,"target")||""};c&&(d["gtm.triggers"]=c.join(","));d["gtm.elementUrl"]=(a.attributes&&a.attributes.formaction?a.formAction:"")||a.action||Uc(a,"href")||a.src||a.code||a.codebase||"";return d},gz=function(a){pi.hasOwnProperty("autoEventsSettings")||(pi.autoEventsSettings={});var b=pi.autoEventsSettings;b.hasOwnProperty(a)||(b[a]= +{});return b[a]},hz=function(a,b,c){gz(a)[b]=c},iz=function(a,b,c,d){var e=gz(a),f=Va(e,b,d);e[b]=c(f)},jz=function(a,b,c){var d=gz(a);return Va(d,b,c)},kz=function(a,b){jz(a,"init",!1)||(hz(a,"init",!0),b())},lz=function(a){return"string"===typeof a?a:String(Gi())};var rz=!!l.MutationObserver,sz=void 0,tz=function(a){if(!sz){var b=function(){var c=C.body;if(c)if(rz)(new MutationObserver(function(){for(var e=0;e=p?0:Math.round(q/p*100),t=C.hidden?!1:.5<=Uq(c);d();var u=void 0;void 0!==b&&(u=[b]);var v=fz(c,"gtm.video",u);v["gtm.videoProvider"]="youtube";v["gtm.videoStatus"]=g;v["gtm.videoUrl"]=n.url;v["gtm.videoTitle"]=n.title;v["gtm.videoDuration"]= +Math.round(p);v["gtm.videoCurrentTime"]=Math.round(q);v["gtm.videoElapsedTime"]=Math.round(f);v["gtm.videoPercent"]=r;v["gtm.videoVisible"]=t;return v},ek:function(){e=Ua()},nd:function(){d()}}};var Gz=l.clearTimeout,Hz=l.setTimeout,Iz=function(a,b,c,d){if(Bn()){b&&H(b)}else return Jc(a,b,c,d)},Jz=function(){return new Date},Kz=function(){return l.location.href},Lz=function(a){return mk(qk(a),"fragment")},Mz=function(a){return nk(qk(a))},Nz=function(a,b){return Pi(a,b||2)},Oz=function(a,b,c){return b?ry(a,b,c):qy(a)},Pz=function(a,b){l[a]=b},W=function(a,b,c){b&&(void 0===l[a]||c&&!l[a])&&(l[a]=b);return l[a]}, +Qz=function(a,b,c){return Hn(a,b,void 0===c?!0:!!c)},Rz=function(a,b,c){return 0===Qn(a,b,c)},Sz=function(a,b){if(Bn()){b&&H(b)}else Lc(a,b)},Tz=function(a){return!!jz(a,"init",!1)},Uz=function(a){hz(a,"init",!0)}; + +function qA(a,b){function c(g){var h=qk(g),m=mk(h,"protocol"),n=mk(h,"host",!0),p=mk(h,"port"),q=mk(h,"path").toLowerCase().replace(/\/$/,"");if(void 0===m||"http"===m&&"80"===p||"https"===m&&"443"===p)m="web",p="default";return[m,n,p,q]}for(var d=c(String(a)),e=c(String(b)),f=0;f=a.Bc)a.zc&&l.clearInterval(a.zc);else{a.Ec++;var b=Ua();qy({event:a.eventName,"gtm.timerId":a.zc,"gtm.timerEventNumber":a.Ec,"gtm.timerInterval":a.interval,"gtm.timerLimit":a.Bc,"gtm.timerStartTime":a.Xe,"gtm.timerCurrentTime":b,"gtm.timerElapsedTime":b-a.Xe,"gtm.triggers":a.gi})}}} +function bD(a,b){ +return b}bD.D="internal.enableAutoEventOnTimer";var xc=ca(["data-gtm-yt-inspected-"]),cD=["www.youtube.com","www.youtube-nocookie.com"],dD,eD=!1; +var fD=function(a,b,c){var d=a.map(function(g){return{Fa:g,We:g,Ue:void 0}});if(!b.length)return d;var e=b.map(function(g){return{Fa:g*c,We:void 0,Ue:g}});if(!d.length)return e;var f=d.concat(e);f.sort(function(g,h){return g.Fa-h.Fa});return f},gD=function(a){a=void 0===a?[]:a;for(var b=[],c=0;ca[c]||b.push(a[c]);b.sort(function(d,e){return d-e});return b},hD=function(a){a=void 0===a?[]:a;for(var b=[],c=0;ca[c]||(b[c]=a[c]/100);b.sort(function(d,e){return d- +e});return b},iD=function(a,b){var c,d;function e(){t=Fz(function(){return{url:w,title:x,ph:v,Gj:a.getCurrentTime(),playbackRate:y}},b.ub,a.getIframe());v=0;x=w="";y=1;return f}function f(G){switch(G){case 1:v=Math.round(a.getDuration());w=a.getVideoUrl();if(a.getVideoData){var E=a.getVideoData();x=E?E.title:""}y=a.getPlaybackRate();b.jh?qy(t.createEvent("start")):t.nd();u=fD(b.Vh,b.Uh,a.getDuration());return g(G);default:return f}}function g(){A=a.getCurrentTime();B=Ta().getTime();t.ek();r();return h} +function h(G){var E;switch(G){case 0:return n(G);case 2:E="pause";case 3:var F=a.getCurrentTime()-A;E=1a.getDuration())return;G=(E.Fa-a.getCurrentTime())/y;if(0>G&&(u.shift(),0===u.length))return}while(0>G);c=function(){d=0;c=p;0d.Oe+60*f))return a;var g=EF(b);if(!g)return a;g.Jc=d.Jc+1;var h;return null!=(h=FF(g.sessionId,g.Jc,g.vd,g.Oe,g.Hh,g.Cc,g.xe))?h:b},JF=function(a,b){var c=b.metadata.cookie_options,d=HF(b,c),e=Zn(a,IF[0],c.domain,c.path),f={Ib:O.g.R,domain:c.domain,path:c.path,expires:c.Dc?new Date(Ua()+1E3*c.Dc):void 0,flags:c.flags};Qn(d, +void 0,f);return 1!==Qn(d,e,f)},KF=function(a){var b=a.metadata.cookie_options,c=HF(a,b),d=Yn(c,b.domain,b.path,IF,O.g.R);if(!d)return d;var e=Hn(c,void 0,void 0,O.g.R);if(d&&1m.length)){var n=Number(m[5]);n&&(!g||n>g)&&(g=n,f=e[h])}}f&&f.substring(f.length-d.length,f.length)!==d&&(M(115),d=f.split(".").slice(2).join("."))}return d},FF=function(a,b,c,d,e,f,g){if(a&&b){var h=[a,b,Pa(c),d,e];h.push(f?"1":"0"); +h.push(g||"0");return h.join(".")}},IF=["GS1"],HF=function(a,b){return b.prefix+"_ga_"+a.target.O[0]},EF=function(a){if(a){var b=a.split(".");if(!(5>b.length||7b.length&&M(67);var c=Number(b[1]),d=Number(b[3]),e=Number(b[4]||0);c||M(118);d||M(119);isNaN(e)&&M(120);if(c&&d&&!isNaN(e))return{sessionId:b[0],Jc:c,vd:!!Number(b[2]),Oe:d,Hh:e,Cc:"1"===b[5],xe:"0"!==b[6]?b[6]:void 0}}}},LF=function(a){return FF(a.h[O.g.Eb],a.h[O.g.Xd],a.h[O.g.Wd],Math.floor(a.metadata.event_start_timestamp_ms/ +1E3),a.metadata.join_timer_sec||0,!!a.metadata[O.g.nf],a.h[O.g.Kd])}; +var MF=function(a){var b=V(a.o,O.g.Ca),c=a.o.s[O.g.Ca];if(c===b)return c;var d=z(b);c&&c[O.g.X]&&(d[O.g.X]=(d[O.g.X]||[]).concat(c[O.g.X]));return d},NF=function(a,b){var c=qo(!0);return"1"!==c._up?{}:{clientId:c[a],Zf:c[b]}},OF=function(a,b,c){var d=qo(!0),e=d[b];e&&(zF(a,e,2),CF(e,a));var f=d[c];f&&JF(f,a);return{clientId:e,Zf:f}},PF=!1,QF=function(a){var b=MF(a)||{},c=a.metadata.cookie_options,d=c.prefix+"_ga",e=HF(a,c),f={};Ao(b[O.g.Qb],!!b[O.g.X])&&(f=OF(a,d,e),f.clientId&&f.Zf&&(PF=!0));b[O.g.X]&& +xo(function(){var g={},h=AF(a);h&&(g[d]=h);var m=KF(a);m&&(g[e]=m);var n=Hn("FPLC",void 0,void 0,O.g.R);n.length&&(g._fplc=n[0]);return g},b[O.g.X],b[O.g.Bb],!!b[O.g.qb]);return f},SF=function(a){if(!V(a.o,O.g.ab))return{};var b=a.metadata.cookie_options,c=b.prefix+"_ga",d=HF(a,b);yo(function(){var e;if(Vj("analytics_storage"))e={};else{var f={};e=(f._up="1",f[c]=a.h[O.g.wb],f[d]=LF(a),f)}return e},1);return!Vj("analytics_storage")&&RF()?NF(c,d):{}},RF=function(){var a=lk(l.location,"host"),b=lk(qk(C.referrer), +"host");return a&&b?a===b||0<=a.indexOf("."+b)||0<=b.indexOf("."+a)?!0:!1:!1};var TF=function(){var a=Ua(),b=a+864E5,c=20,d=5E3;return function(){var e=Ua();e>=b&&(b=e+864E5,d=5E3);if(1>d)return!1;c=Math.min(c+(e-a)/1E3*5,20);a=e;if(1>c)return!1;d--;c--;return!0}}; +var UF=function(a,b){gn()&&(a.gcs=hn(),b.metadata.is_consent_update&&(a.gcu="1"));a.gcd=mn(b.o);fn(b.o)?R(24)&&(a.npa="0"):a.npa="1"},XF=function(a){if(a.metadata.is_merchant_center)return"https://www.merchant-center-analytics.goog/mc/collect";var b=uk(xk(a.o),"/g/collect");if(b)return b;if(Ii.s)return""+Ji()+"/g/collect";var c=Mr(a),d=V(a.o,O.g.lb);return c&&!ej()&&!1!==d&&XD()&&Vj(O.g.J)&&Vj(O.g.R)?VF():WF()},YF=!1;YF=!0; +var ZF={};ZF[O.g.wb]="cid";ZF[O.g.Nb]="are";ZF[O.g.qf]="_fid";ZF[O.g.Fg]="_geo";ZF[O.g.zb]="gdid";ZF[O.g.bd]="ir";ZF[O.g.Na]="ul";ZF[O.g.Ud]="_rdi";ZF[O.g.Db]="sr";ZF[O.g.bj]="tid";ZF[O.g.zf]="tt";ZF[O.g.he]="ec_mode";ZF[O.g.lj]="gtm_up";ZF[O.g.Yd]="uaa";ZF[O.g.Zd]="uab";ZF[O.g.ae]="uafvl";ZF[O.g.be]="uamb";ZF[O.g.ce]="uam";ZF[O.g.de]="uap";ZF[O.g.ee]="uapv";ZF[O.g.fe]="uaw"; +ZF[O.g.cj]="ur";ZF[O.g.Td]="lps";ZF[O.g.Pg]="pae";ZF[O.g.hf]="pscdl"; +var $F={};$F[O.g.Mc]="cc";$F[O.g.Nc]="ci";$F[O.g.Oc]="cm";$F[O.g.Pc]="cn";$F[O.g.Rc]="cs";$F[O.g.Sc]="ck";$F[O.g.ya]="cu";$F[O.g.za]="dl";$F[O.g.Ka]="dr";$F[O.g.Cb]="dt";$F[O.g.Wd]="seg";$F[O.g.Eb]="sid";$F[O.g.Xd]="sct";$F[O.g.Ua]="uid";R(17)&&($F[O.g.ed]="dp");var aG={};aG[O.g.Jd]="_et";aG[O.g.xb]="edid";var bG={};bG[O.g.Mc]="cc";bG[O.g.Nc]="ci"; +bG[O.g.Oc]="cm";bG[O.g.Pc]="cn";bG[O.g.Rc]="cs";bG[O.g.Sc]="ck";var cG={},dG=Object.freeze((cG[O.g.Da]=1,cG)),WF=function(){var a="www";YF&&gj()&&(a=gj());return"https://"+a+".google-analytics.com/g/collect"},VF=function(){var a;YF&&""!==gj()&&(a=gj());return"https://"+(a?a+".":"")+"analytics.google.com/g/collect"},eG=function(a,b,c){var d={},e={},f={};d.v="2";d.tid=a.target.ba;$p(a,"google_ono",!1)&&!ej()&&(d._ono=1);d.gtm=Dn(Zp(a));d._p=R(66)?Ci:xF();c&&(d.em=c);a.metadata.create_google_join&&(d._gaz= +1);UF(d,a);zn()&&(d.dma_cps=nn());d.dma=on();Lm(Tm())&&(d.tcfd=An());Ii.h&&(d.tag_exp=Ii.h);var g=a.h[O.g.zb];g&&(d.gdid=g);e.en=String(a.eventName);a.metadata.is_first_visit&&(e._fv=a.metadata.is_first_visit_conversion?2:1);a.metadata.is_new_to_site&&(e._nsi=1);a.metadata.is_session_start&&(e._ss=a.metadata.is_session_start_conversion?2:1);a.metadata.is_conversion&&(e._c=1);a.metadata.is_external_event&&(e._ee=1);if(a.metadata.is_ecommerce){var h=a.h[O.g.Z]||V(a.o,O.g.Z);if(Ha(h))for(var m=0;mm;m++)e["pr"+(m+1)]=eg(h[m])}var n=a.h[O.g.xb];n&&(e.edid=n);var p=function(t,u){if("object"!==typeof u||!dG[t]){var v="ep."+t,w="epn."+t;t=Ga(u)?w:v;var x=Ga(u)?v:w;e.hasOwnProperty(x)&&delete e[x];e[t]=String(u)}},q=R(56)&&Nr(a);Na(a.h,function(t,u){if(void 0!==u&&!$h.hasOwnProperty(t)){null===u&&(u="");var v;t!==O.g.Kd?v=!1:a.metadata.euid_mode_enabled||q?(d.ecid=u,v=!0):v=void 0;if(!v&&t!==O.g.nf){var w=u;!0===u&&(w="1");!1===u&&(w="0");w=String(w);var x;if(ZF[t])x=ZF[t],d[x]=w;else if($F[t])x= +$F[t],f[x]=w;else if(aG[t])x=aG[t],e[x]=w;else if("_"===t.charAt(0))d[t]=w;else{var y;bG[t]?y=!0:t!==O.g.Qc?y=!1:("object"!==typeof u&&p(t,u),y=!0);y||p(t,u)}}}});(function(t){Nr(a)&&"object"===typeof t&&Na(t||{},function(u,v){"object"!==typeof v&&(d["sst."+u]=String(v))})})(a.h[O.g.pe]);var r=a.h[O.g.cb]||{};R(8)&&!1===V(a.o,O.g.lb,void 0,4)&&(d.ngs="1");Na(r,function(t,u){void 0!==u&&((null===u&&(u=""),t!==O.g.Ua||f.uid)?b[t]!==u&&(e[(Ga(u)?"upn.":"up.")+String(t)]=String(u),b[t]=u):f.uid=String(u))}); +return gg.call(this,{oa:d,Kc:f,rh:e},XF(a),Nr(a))||this};sa(eG,gg);var fG=function(a){this.s=a;this.C="";this.h=this.s},gG=function(a,b){a.h=b;return a};function hG(a){var b=a.search;return a.protocol+"//"+a.hostname+a.pathname+(b?b+"&richsstsse":"?richsstsse")}function iG(a,b,c){if(a){var d=a||[],e=nb(b)?b:{};if(Array.isArray(d))for(var f=0;fthis.s){var f= +l.setTimeout,g;Nr(a)?sG?(sG=!1,g=tG):g=uG:g=5E3;this.s=f.call(l,function(){return c.flush()},g)}}else{var h=jg(d,this.F++);pG(d.s,h.params,h.body,d.F);var m=a.metadata.create_dc_join,n=a.metadata.create_google_join,p=!1!==V(a.o,O.g.Ja),q=fn(a.o),r={eventId:a.o.eventId,priorityId:a.o.priorityId},t=!1;R(76)&&(t=a.h[O.g.Pg]);var u={qn:m,rn:n,gm:ij(),Dl:p,Cl:q,zm:ej(),ym:a.metadata.euid_mode_enabled,mo:r,gn:t,o:a.o};qG(d,u)}Au(a.o.eventId,a.eventName)};rG.prototype.add=function(a){a.metadata.euid_mode_enabled&& +!rF?this.W(a):this.C(a)};rG.prototype.flush=function(){if(this.h.events.length){var a=kg(this.h,this.F++);pG(this.h.s,a.params,a.body,this.h.C);this.h=new hg;0<=this.s&&(l.clearTimeout(this.s),this.s=-1)}};rG.prototype.W=function(a){var b=this,c=Or(a);c?Xh(c,function(d){b.C(a,1===d.split("~").length?void 0:d)}):this.C(a)};var oG=function(a,b,c){var d=a+"?"+b;if(c)try{Cc.sendBeacon&&Cc.sendBeacon(d,c)}catch(e){Ab("TAGGING",15)}else Tc(d)},tG=nm('', +500),uG=nm('',5E3),sG=!0;var vG=function(a,b,c){void 0===c&&(c={});if("object"===typeof b)for(var d in b)vG(a+"."+d,b[d],c);else c[a]=b;return c},wG=function(a){if(Nr(a)){if(R(56)){var b=$p(a,"ccd_add_1p_data",!1)?1:0;Qr(a,"ude",b)}var c=function(e){var f=vG(O.g.Da,e);Na(f,function(g,h){a.h[g]=h})},d=V(a.o,O.g.Da);void 0!==d?(c(d),R(51)&&(a.h[O.g.he]="c")):c(a.metadata.user_data);a.metadata.user_data=void 0}};var xG=window,yG=document,zG=function(a){var b=xG._gaUserPrefs;if(b&&b.ioo&&b.ioo()||yG.documentElement.hasAttribute("data-google-analytics-opt-out")||a&&!0===xG["ga-disable-"+a])return!0;try{var c=xG.external;if(c&&c._gaUserPrefs&&"oo"==c._gaUserPrefs)return!0}catch(f){}for(var d=En("AMP_TOKEN",String(yG.cookie),!0),e=0;ena.Oe+60*da&&(Da=!0,na.sessionId=String(ja),na.Jc++,na.vd=!1,na.xe=void 0);if(Da)a.metadata.is_session_start=!0,F.mm(a);else if(F.bm()>Ca||a.eventName==O.g.nc)na.vd=!0;a.metadata.euid_mode_enabled?V(a.o,O.g.Ua)?na.Cc=!0:(na.Cc&&(na.xe=void 0),na.Cc=!1):na.Cc=!1;var Ma=na.xe,mb=R(56)&&Nr(a);if(a.metadata.euid_mode_enabled||mb){var Mb=V(a.o,O.g.Kd),vd=Mb?1:8;Mb||(Mb=Ma,vd=4);Mb||(Mb=Wn(),vd=7);var kh=Mb.toString(),uw=vd,vw=a.metadata.enhanced_client_id_source;if(void 0===vw||uw<=vw)a.h[O.g.Kd]=kh, +a.metadata.enhanced_client_id_source=uw}N?(a.copyToHitData(O.g.Eb,na.sessionId),a.copyToHitData(O.g.Xd,na.Jc),a.copyToHitData(O.g.Wd,na.vd?1:0)):(a.h[O.g.Eb]=na.sessionId,a.h[O.g.Xd]=na.Jc,a.h[O.g.Wd]=na.vd?1:0);a.metadata[O.g.nf]=na.Cc?1:0;KG(a);if(!V(a.o,O.g.Pb)||!V(a.o,O.g.Ab)){var ww="",lh=C.location;if(lh){var rj=lh.pathname||"";"/"!=rj.charAt(0)&&(rj="/"+rj);ww=lh.protocol+"//"+lh.hostname+rj+lh.search}a.copyToHitData(O.g.za,ww,DG);var SI=a.copyToHitData,TI=O.g.Ka,sj;a:{var xw=Hn("_opt_expid", +void 0,void 0,O.g.R)[0];if(xw){var yw=decodeURIComponent(xw).split("$");if(3===yw.length){sj=yw[2];break a}}if(void 0!==pi.ga4_referrer_override)sj=pi.ga4_referrer_override;else{var zw=Pi("gtm.gtagReferrer."+a.target.ba),UI=C.referrer;sj=zw?""+zw:UI}}SI.call(a,TI,sj||void 0,DG);a.copyToHitData(O.g.Cb,C.title);a.copyToHitData(O.g.Na,(Cc.language||"").toLowerCase());var Aw=Rq();a.copyToHitData(O.g.Db,Aw.width+"x"+Aw.height);R(17)&&a.copyToHitData(O.g.ed,void 0,DG);R(46)&&eq()&&a.copyToHitData(O.g.Td, +"1")}a.metadata.create_dc_join=!1;a.metadata.create_google_join=!1;if(!(R(31)&&Nr(a)||a.metadata.is_merchant_center||!1===V(a.o,O.g.lb))&&XD()&&Vj(O.g.J)){var Bw=Mr(a);(a.metadata.is_session_start||V(a.o,O.g.Eg))&&(a.metadata.create_dc_join=!!Bw);var Cw;Cw=a.metadata.join_timer_sec;Bw&&0===(Cw||0)&&(a.metadata.join_timer_sec=60,a.metadata.create_google_join=!0)}LG(a);di.hasOwnProperty(a.eventName)&&(a.metadata.is_ecommerce=!0,a.copyToHitData(O.g.Z),a.copyToHitData(O.g.ya));a.copyToHitData(O.g.zf); +for(var Dw=V(a.o,O.g.tf)||[],pn=0;pn${suggestion.title} -Z.securityGroups.gct=["google"],function(){function a(b){for(var c=[],d=0;dP.length){if(0===P.length)continue;break a}C&&C[J]&&C[J].length&&Ef(P,C[J]);try{dA.execute(P),S(35)&&Tk&&50===P[0]&&I.push(P[1])}catch(hh){}}S(35)&&(wf=I)}if(void 0!==w)for(var U=["sandboxedScripts"],aa=0;aaN.length){if(0===N.length)continue;break a}A&&A[F]&&A[F].length&&Df(N,A[F]);try{AA.execute(N),R(27)&&hl&&50===N[0]&&E.push(N[1])}catch(Mb){}}R(27)&&(vf=E)}if(void 0!==w)for(var P=["sandboxedScripts"],S=0;SOkan Bulut
    -

    Welcome to the Psychometrics and Data Science - with R and Python blog!

    +

    Welcome to my blog, Psychometrics and Data + Science with R and Python!

    I am an Associate Professor in the Measurement, Evaluation, and Data Science program at the University of Alberta. I teach graduate courses and workshops on - psychometrics, educational measurement, and statistical - modeling using R. For more information, you can visit my - personal website: www.okanbulut.com.

    -

    As a passionate R user, I always conduct statistical and + psychometrics, machine learning, and statistical modeling + using R. For more information, you can visit my personal + website: www.okanbulut.com.

    +

    As a passionate R user, I conduct statistical and psychometric analysis on educational and psychological data using R. I often write my own functions but I also benefit from the existing R packages available on CRAN, GitHub, and other platforms. @@ -1613,11 +1606,12 @@

    Okan Bulut

    files for each chapter of our book are available on GitHub: https://github.com/cddesja/hemp.

    In this blog, I hope to continue sharing psychometric - applications using R. In addition, I plan to present new - examples focusing on the state-of-the-art methods in data - science and educational data mining using Python. Questions, - comments, and suggestions from all readers are welcomed! - Lastly, if my blog has helped you, you can buy me + applications using R. In addition, I also plan to present + new examples focusing on the state-of-the-art methods in + psychometrics, data science, and educational data mining + using Python. Questions, comments, and suggestions from all + readers are welcomed! Lastly, if my blog has helped you, you + can buy me coffee :-)

    @@ -1663,16 +1657,16 @@

    Okan Bulut

    -

    Welcome to the Psychometrics and Data Science - with R and Python blog!

    +

    Welcome to my blog, Psychometrics and Data + Science with R and Python!

    I am an Associate Professor in the Measurement, Evaluation, and Data Science program at the University of Alberta. I teach graduate courses and workshops on - psychometrics, educational measurement, and statistical - modeling using R. For more information, you can visit my - personal website: www.okanbulut.com.

    -

    As a passionate R user, I always conduct statistical - and psychometric analysis on educational and psychological + psychometrics, machine learning, and statistical modeling + using R. For more information, you can visit my personal + website: www.okanbulut.com.

    +

    As a passionate R user, I conduct statistical and + psychometric analysis on educational and psychological data using R. I often write my own functions but I also benefit from the existing R packages available on CRAN, GitHub, and other platforms. In 2018, my colleague Chris Desjardins and I @@ -1686,11 +1680,12 @@

    Okan Bulut

    files for each chapter of our book are available on GitHub: https://github.com/cddesja/hemp.

    In this blog, I hope to continue sharing psychometric - applications using R. In addition, I plan to present new - examples focusing on the state-of-the-art methods in data - science and educational data mining using Python. - Questions, comments, and suggestions from all readers are - welcomed! Lastly, if my blog has helped you, you can buy me + applications using R. In addition, I also plan to present + new examples focusing on the state-of-the-art methods in + psychometrics, data science, and educational data mining + using Python. Questions, comments, and suggestions from + all readers are welcomed! Lastly, if my blog has helped + you, you can buy me coffee :-)

    @@ -1720,7 +1715,7 @@

    Okan Bulut

    diff --git a/docs/data_and_codes/Wave1_Alberta.zip b/docs/data_and_codes/Wave1_Alberta.zip new file mode 100644 index 0000000..ea4488a Binary files /dev/null and b/docs/data_and_codes/Wave1_Alberta.zip differ diff --git a/docs/data_and_codes/wave1_alberta.RData b/docs/data_and_codes/wave1_alberta.RData new file mode 100644 index 0000000..bc66d10 Binary files /dev/null and b/docs/data_and_codes/wave1_alberta.RData differ diff --git a/docs/data_and_codes/wave1_alberta_sentence.RData b/docs/data_and_codes/wave1_alberta_sentence.RData new file mode 100644 index 0000000..db31565 Binary files /dev/null and b/docs/data_and_codes/wave1_alberta_sentence.RData differ diff --git a/docs/index.html b/docs/index.html index 8f863dc..7331d28 100644 --- a/docs/index.html +++ b/docs/index.html @@ -2615,6 +2615,30 @@

    ${suggestion.title}

    - +

    Visualizing Machine Learning Models

    @@ -2935,7 +2959,7 @@

    Categories

    • Articles -(12) +(13)
    • CAT @@ -2951,7 +2975,7 @@

      Categories

    • data science -(3) +(4)
    • data visualization @@ -2983,7 +3007,7 @@

      Categories

    • natural language processing -(3) +(4)
    • network @@ -3010,6 +3034,10 @@

      Categories

      (1)
    • +sentiment +(1) +
    • +
    • survey (1)
    • @@ -3019,7 +3047,7 @@

      Categories

    • text mining -(3) +(4)
    • text vectorization @@ -3063,7 +3091,7 @@

      Reuse

      diff --git a/docs/index.xml b/docs/index.xml index 92c1f4e..a524bad 100644 --- a/docs/index.xml +++ b/docs/index.xml @@ -13,7 +13,22 @@ psychological data https://okan.cloud/ Distill - Thu, 04 Jan 2024 00:00:00 +0000 + Fri, 09 Feb 2024 00:00:00 +0000 + + Lexicon-Based Sentiment Analysis Using R + Okan Bulut + https://okan.cloud/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r + In this post, we will uncover the power of lexicon-based sentiment analysis using R. I demonstrate how to harness the capabilities of lexicons like NRC and Bing to decipher the emotional pulse of your text data. With practical examples, you'll gain the skills to analyze sentiment scores and extract valuable insights from your textual data sets. + +(12 min read) + data science + natural language processing + text mining + sentiment + https://okan.cloud/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r + Fri, 09 Feb 2024 00:00:00 +0000 + + Introduction to Psychometric Network Analysis Okan Bulut diff --git a/docs/posts/2020-12-14-explanatory-irt-models-in-r/index.html b/docs/posts/2020-12-14-explanatory-irt-models-in-r/index.html index 0cfd58a..3d68975 100644 --- a/docs/posts/2020-12-14-explanatory-irt-models-in-r/index.html +++ b/docs/posts/2020-12-14-explanatory-irt-models-in-r/index.html @@ -2770,7 +2770,7 @@

      Citation

      diff --git a/docs/posts/2020-12-21-testing-for-measurement-invariance-in-r/index.html b/docs/posts/2020-12-21-testing-for-measurement-invariance-in-r/index.html index bdf2d71..e617b96 100644 --- a/docs/posts/2020-12-21-testing-for-measurement-invariance-in-r/index.html +++ b/docs/posts/2020-12-21-testing-for-measurement-invariance-in-r/index.html @@ -3076,7 +3076,7 @@

      Citation

      diff --git a/docs/posts/2021-01-04-how-to-shorten-a-measurement-instrument-automatically-part-i/index.html b/docs/posts/2021-01-04-how-to-shorten-a-measurement-instrument-automatically-part-i/index.html index d4e767e..fe02af2 100644 --- a/docs/posts/2021-01-04-how-to-shorten-a-measurement-instrument-automatically-part-i/index.html +++ b/docs/posts/2021-01-04-how-to-shorten-a-measurement-instrument-automatically-part-i/index.html @@ -3013,7 +3013,7 @@

      Citation

      diff --git a/docs/posts/2021-01-19-how-to-shorten-a-measurement-instrument-automatically-part-ii/index.html b/docs/posts/2021-01-19-how-to-shorten-a-measurement-instrument-automatically-part-ii/index.html index 8daaf69..b333ec2 100644 --- a/docs/posts/2021-01-19-how-to-shorten-a-measurement-instrument-automatically-part-ii/index.html +++ b/docs/posts/2021-01-19-how-to-shorten-a-measurement-instrument-automatically-part-ii/index.html @@ -3081,7 +3081,7 @@

      Citation

      diff --git a/docs/posts/2021-02-12-a-polytomous-scoring-approach-based-on-item-response-time/index.html b/docs/posts/2021-02-12-a-polytomous-scoring-approach-based-on-item-response-time/index.html index 59ffd84..a5f8ffa 100644 --- a/docs/posts/2021-02-12-a-polytomous-scoring-approach-based-on-item-response-time/index.html +++ b/docs/posts/2021-02-12-a-polytomous-scoring-approach-based-on-item-response-time/index.html @@ -3020,7 +3020,7 @@

      Citation

      diff --git a/docs/posts/2021-02-20-building-a-computerized-adaptive-version-of-psychological-scales/index.html b/docs/posts/2021-02-20-building-a-computerized-adaptive-version-of-psychological-scales/index.html index 4dd4b55..983f3fb 100644 --- a/docs/posts/2021-02-20-building-a-computerized-adaptive-version-of-psychological-scales/index.html +++ b/docs/posts/2021-02-20-building-a-computerized-adaptive-version-of-psychological-scales/index.html @@ -3100,7 +3100,7 @@

      Citation

      diff --git a/docs/posts/2021-03-04-5-ways-to-effectively-visualize-survey-data/index.html b/docs/posts/2021-03-04-5-ways-to-effectively-visualize-survey-data/index.html index 9f7d790..773772d 100644 --- a/docs/posts/2021-03-04-5-ways-to-effectively-visualize-survey-data/index.html +++ b/docs/posts/2021-03-04-5-ways-to-effectively-visualize-survey-data/index.html @@ -3085,7 +3085,7 @@

      Citation

      diff --git a/docs/posts/2021-03-23-visualizing-machine-learning-models/index.html b/docs/posts/2021-03-23-visualizing-machine-learning-models/index.html index a8cacc3..00e845a 100644 --- a/docs/posts/2021-03-23-visualizing-machine-learning-models/index.html +++ b/docs/posts/2021-03-23-visualizing-machine-learning-models/index.html @@ -3097,7 +3097,7 @@

      Citation

      diff --git a/docs/posts/2021-04-08-text-vectorization-using-python-term-document-matrix/index.html b/docs/posts/2021-04-08-text-vectorization-using-python-term-document-matrix/index.html index c7738a2..5f1cd68 100644 --- a/docs/posts/2021-04-08-text-vectorization-using-python-term-document-matrix/index.html +++ b/docs/posts/2021-04-08-text-vectorization-using-python-term-document-matrix/index.html @@ -2877,7 +2877,7 @@

      Citation

      diff --git a/docs/posts/2022-01-16-text-vectorization-using-python-tf-idf/index.html b/docs/posts/2022-01-16-text-vectorization-using-python-tf-idf/index.html index b2ae3ac..17c00ce 100644 --- a/docs/posts/2022-01-16-text-vectorization-using-python-tf-idf/index.html +++ b/docs/posts/2022-01-16-text-vectorization-using-python-tf-idf/index.html @@ -2787,7 +2787,7 @@

      Citation

      diff --git a/docs/posts/2022-05-02-text-vectorization-using-python-word2vec/index.html b/docs/posts/2022-05-02-text-vectorization-using-python-word2vec/index.html index f7d9f63..517a88e 100644 --- a/docs/posts/2022-05-02-text-vectorization-using-python-word2vec/index.html +++ b/docs/posts/2022-05-02-text-vectorization-using-python-word2vec/index.html @@ -2796,7 +2796,7 @@

      Citation

      diff --git a/docs/posts/2024-01-04-introduction-to-psychometric-network-analysis/index.html b/docs/posts/2024-01-04-introduction-to-psychometric-network-analysis/index.html index 3ddabe6..e4e270e 100644 --- a/docs/posts/2024-01-04-introduction-to-psychometric-network-analysis/index.html +++ b/docs/posts/2024-01-04-introduction-to-psychometric-network-analysis/index.html @@ -3120,7 +3120,7 @@

      Citation

      diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf new file mode 100644 index 0000000..6f2540a Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Rapid assessment of communication consistency_sentiment analysis of public health briefings.pdf differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf new file mode 100644 index 0000000..fa8f705 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/Materials/Using Data Mining for Rapid Complex Case Study.pdf differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/apa.csl b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/apa.csl new file mode 100644 index 0000000..081857d --- /dev/null +++ b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/apa.csl @@ -0,0 +1,1916 @@ + + diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg new file mode 100644 index 0000000..d15743d Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/index.html b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/index.html new file mode 100644 index 0000000..2fe992b --- /dev/null +++ b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/index.html @@ -0,0 +1,3061 @@ + + + + + + + + + + + + + + + + + + + + + +Okan Bulut: Lexicon-Based Sentiment Analysis Using R + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +

      Lexicon-Based Sentiment Analysis Using R

      + + + + +

      In this post, we will uncover the power of lexicon-based sentiment analysis using R. I demonstrate how to harness the capabilities of lexicons like NRC and Bing to decipher the emotional pulse of your text data. With practical examples, you’ll gain the skills to analyze sentiment scores and extract valuable insights from your textual data sets.

      +

      (12 min read)

      +
      + + + +
      + +
      +Photo by Roman Odintsov on Pexels + +
      +

      Introduction

      +

      During the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied rather than constantly immersing myself in pandemic-related news. After evaluating several options, I found the concepts related to natural language processing (NLP) particularly captivating. So, I opted to delve deeper into this field and explore one specific technique: sentiment analysis, also known as “opinion mining” in academic literature. This analytical method empowers researchers to extract and interpret the emotions conveyed toward a specific subject within written text. Through sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts.

      +

      Amidst the pandemic, I observed a significant trend among researchers who turned to sentiment analysis as a tool to measure public responses to news and developments surrounding the virus. This involved analyzing user-generated content on popular social media platforms such as Twitter, YouTube, and Instagram. Intrigued by this methodology, my colleagues and I endeavored to contribute to the existing body of research by scrutinizing the daily briefings provided by public health authorities. In Alberta, Dr. Deena Hinshaw, who used to be the province’s chief medical officer of health, regularly delivered updates on the region’s response to the ongoing pandemic. Through our analysis of these public health announcements, we aimed to assess Alberta’s effectiveness in implementing communication strategies during this intricate public health crisis. Our investigation, conducted through the lenses of sentiment analysis, sought to shed light on the efficacy of communication strategies employed during this challenging period in public health (Bulut & Poth, 2022; Poth et al., 2021).

      +

      In this post, I aim to walk you through the process of performing sentiment analysis using R. Specifically, I’ll focus on “lexicon-based sentiment analysis,” which I’ll discuss in more detail in the next section. I’ll provide examples of lexicon-based sentiment analysis that we’ve integrated into the publications referenced earlier. Additionally, in future posts, I’ll delve into more advanced forms of sentiment analysis, making use of state-of-the-art pre-trained models accessible on Hugging Face.

      +

      Lexicon-Based Sentiment Analysis

      +

      As I learned more about sentiment analysis, I discovered that the predominant method for extracting sentiments is lexicon-based sentiment analysis. This approach entails utilizing a specific lexicon, essentially the vocabulary of a language or subject, to discern the direction and intensity of sentiments conveyed within a given text. Some lexicons, like the Bing lexicon (Hu & Liu, 2004), classify words as either positive or negative. Conversely, other lexicons provide more detailed sentiment labels, such as the NRC Emotion Lexicon (Mohammad & Turney, 2013), which categorizes words based on both positive and negative sentiments, as well as Plutchik’s (Plutchik, 1980) psych evolutionary theory of basic emotions (e.g., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust).

      +

      Lexicon-based sentiment analysis operates by aligning words within a given text with those found in widely-used lexicons such as NRC and Bing. Each word receives an assigned sentiment, typically categorized as positive or negative. The text’s collective sentiment score is subsequently derived by summing the individual sentiment scores of its constituent words. For instance, in a scenario where a text incorporates 50 positive and 30 negative words according to the Bing lexicon, the resulting sentiment score would be 20. This value indicates a predominance of positive sentiments within the text. Conversely, a negative total would imply a prevalence of negative sentiments.

      +

      Performing lexicon-based sentiment analysis using R can be both fun and tricky at the same time. While analyzing public health announcements in terms of sentiments, I found Julia Silge and David Robinson’s book, Text Mining with R, to be very helpful. The book has a chapter dedicated to sentiment analysis, where the authors demonstrate how to conduct sentiment analysis using general-purpose lexicons like Bing and NRC. However, Julia and David also highlight a major limitation of lexicon-based sentiment analysis. The analysis considers only single words (i.e., unigrams) and does not consider qualifiers before a word. For instance, negation words like “not” in “not true” are ignored, and sentiment analysis processes them as two separate words, “not” and “true”. Furthermore, if a particular word (either positive or negative) is repeatedly used throughout the text, this may skew the results depending on the polarity (positive or negative) of this word. Therefore, the results of lexicon-based sentiment analysis should be interpreted carefully.

      +

      Now, let’s move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw’s media briefings during the COVID-19 pandemic. My goal is to showcase two R packages capable of running sentiment analysis 📉.

      +

      Example

      +

      For the sake of simplicity, we will focus on the first wave of the pandemic (March 2020 - June 2020). The transcripts of all media briefings were available in the government of Alberta’s COVID-19 pandemic website (https://www.alberta.ca/covid). After importing these transcripts into R, I turned all the text into lowercase and then applied word tokenization using the tidytext (Silge & Robinson, 2016) and tokenizers (Mullen et al., 2018) packages. Word tokenization split the sentences in the media briefings into individual words for each entry (i.e., day of media briefings). Next, I applied lemmatization to the tokens to resolve each word into its canonical form using the textstem package (Rinker, 2018). Finally, I removed common stopwords, such as “my”, “for”, “that”, “with”, and “for, using the stopwords package (Benoit et al., 2021). The final dataset is available here. Now, let’s import the data into R and then review its content.

      +
      +
      +
      load("wave1_alberta.RData")
      +
      +head(wave1_alberta, 10)
      +
      +
      +
      +
      + +
      +
      +

      The dataset has three columns:

      +
        +
      • month (the month of the media briefing)
      • +
      • date (the exact date of the media briefing), and
      • +
      • word (words or tokens used in media briefing)
      • +
      +

      Descriptive Analysis

      +

      Now, we can calculate some descriptive statistics to better understand the content of our dataset. We will begin by finding the top 5 words (based on their frequency) for each month.

      +
      +
      +
      library("dplyr")
      +
      +wave1_alberta %>%
      +  group_by(month) %>%
      +  count(word, sort = TRUE) %>%
      +  slice_head(n = 5) %>%
      +  as.data.frame()
      +
      +
              month      word   n
      +1  March 2020    health 199
      +2  March 2020      care 102
      +3  March 2020  continue 102
      +4  March 2020    spread  87
      +5  March 2020      test  86
      +6  April 2020      test 156
      +7  April 2020    health 146
      +8  April 2020      care 145
      +9  April 2020  continue 135
      +10 April 2020    spread 129
      +11   May 2020    health 135
      +12   May 2020  continue 118
      +13   May 2020      test 102
      +14   May 2020    people  78
      +15   May 2020    public  78
      +16  June 2020      test 126
      +17  June 2020    health  93
      +18  June 2020  continue  69
      +19  June 2020    people  57
      +20  June 2020 community  43
      +
      +

      The output shows that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and view the results visually:

      +
      +
      +
      library("tidytext")
      +library("ggplot2")
      +
      +wave1_alberta %>%
      +  group_by(month) %>%
      +  count(word, sort = TRUE) %>%
      +  # Find the top 10 words
      +  slice_head(n = 10) %>%
      +  ungroup() %>%
      +  # Order the words by their frequency within each month
      +  mutate(word = reorder_within(word, n, month)) %>%
      +  # Create a bar graph
      +  ggplot(aes(x = n, y = word, fill = month)) +
      +  geom_col() +
      +  scale_y_reordered() +
      +  facet_wrap(~ month, scales = "free_y") +
      +  labs(x = "Frequency", y = NULL) +
      +  theme(legend.position = "none",
      +        axis.text.x = element_text(size = 11),
      +        axis.text.y = element_text(size = 11),
      +        strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 13))
      +
      +
      +Most common words based on frequency +

      +Figure 1: Most common words based on frequency +

      +
      +
      +

      Since some words are common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)–a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out my previous blog post). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, wave1_tf_idf, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we use this dataset to create a bar plot with the TF-IDF values to view the common words unique to each month.

      +
      +
      +
      # Calculate TF-IDF for the words for each month
      +wave1_tf_idf <- wave1_alberta %>%
      +  count(month, word, sort = TRUE) %>%
      +  bind_tf_idf(word, month, n) %>%
      +  arrange(month, -tf_idf) %>%
      +  group_by(month) %>%
      +  top_n(10) %>%
      +  ungroup
      +
      +# Visualize the results
      +wave1_tf_idf %>%
      +  mutate(word = reorder_within(word, tf_idf, month)) %>%
      +  ggplot(aes(word, tf_idf, fill = month)) +
      +  geom_col(show.legend = FALSE) + 
      +  facet_wrap(~ month, scales = "free", ncol = 2) +
      +  scale_x_reordered() +
      +  coord_flip() +
      +  theme(strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 13),
      +        axis.text.x = element_text(size = 11),
      +        axis.text.y = element_text(size = 11)) +
      +  labs(x = NULL, y = "TF-IDF")
      +
      +
      +Most common words based on TIF-IDF +

      +Figure 2: Most common words based on TIF-IDF +

      +
      +
      +

      These results are more informative because the tokens shown in the figure reflect unique topics discussed each month. For example, in March 2020, the media briefings were mostly about limiting the travels, returning from crowded conferences, and COVID-19 cases in cruise ships. In June 2020, the focus of the media briefings shifted towards mask requirements, people protesting pandemic-related restrictions, and so on. Before we switch back to the sentiment analysis, let’s take a look at another descriptive variable: the length of each media briefing. This will show us whether the media briefings became longer or shorter over time.

      +
      +
      +
      wave1_alberta %>%
      +  mutate(day = substr(date, 9, 10)) %>%
      +  group_by(month, day) %>%
      +  summarize(n = n()) %>%
      +  ggplot(aes(day, n, color = month, shape = month, group = month)) +
      +  geom_point(size = 2) + 
      +  geom_line() + 
      +  labs(x = "Days", y = "Number of Words") +
      +  theme(legend.position = "none", 
      +        axis.text.x = element_text(angle = 90, size = 11),
      +        strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 11),
      +        axis.text.y = element_text(size = 11)) +
      +  ylim(0, 800) +
      +  facet_wrap(~ month, scales = "free_x")
      +
      +
      +Number of words by days +

      +Figure 3: Number of words by days +

      +
      +
      +

      The figure above shows that the length of media briefings varied quite substantially over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length.

      +

      Sentiment Analysis with tidytext

      +

      After analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the tidytext package for performing sentiment analysis and computing sentiment scores. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to find the difference between the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words).

      +
      +
      +
      # From the three lexicons, Bing is already available in the tidytext page
      +# for AFINN and NRC, install the textdata package by uncommenting the next line
      +# install.packages("textdata")
      +get_sentiments("bing") 
      +
      +
      # A tibble: 6,786 × 2
      +   word        sentiment
      +   <chr>       <chr>    
      + 1 2-faces     negative 
      + 2 abnormal    negative 
      + 3 abolish     negative 
      + 4 abominable  negative 
      + 5 abominably  negative 
      + 6 abominate   negative 
      + 7 abomination negative 
      + 8 abort       negative 
      + 9 aborted     negative 
      +10 aborts      negative 
      +# ℹ 6,776 more rows
      +
      +
      get_sentiments("afinn") 
      +
      +
      # A tibble: 2,477 × 2
      +   word       value
      +   <chr>      <dbl>
      + 1 abandon       -2
      + 2 abandoned     -2
      + 3 abandons      -2
      + 4 abducted      -2
      + 5 abduction     -2
      + 6 abductions    -2
      + 7 abhor         -3
      + 8 abhorred      -3
      + 9 abhorrent     -3
      +10 abhors        -3
      +# ℹ 2,467 more rows
      +
      +
      get_sentiments("nrc")
      +
      +
      # A tibble: 13,901 × 2
      +   word        sentiment
      +   <chr>       <chr>    
      + 1 abacus      trust    
      + 2 abandon     fear     
      + 3 abandon     negative 
      + 4 abandon     sadness  
      + 5 abandoned   anger    
      + 6 abandoned   fear     
      + 7 abandoned   negative 
      + 8 abandoned   sadness  
      + 9 abandonment anger    
      +10 abandonment fear     
      +# ℹ 13,891 more rows
      +
      +
      # We will need the spread function from tidyr
      +library("tidyr")
      +
      +# Sentiment scores with bing (based on frequency)
      +wave1_alberta %>%
      +  mutate(day = substr(date, 9, 10)) %>%
      +  group_by(month, day) %>%
      +  inner_join(get_sentiments("bing")) %>%
      +  count(month, day, sentiment) %>%
      +  spread(sentiment, n) %>%
      +  mutate(sentiment = positive - negative) %>%
      +  ggplot(aes(day, sentiment, fill = month)) +
      +  geom_col(show.legend = FALSE) +
      +  labs(x = "Days", y = "Sentiment Score") +
      +  ylim(-50, 50) + 
      +  theme(legend.position = "none", axis.text.x = element_text(angle = 90)) +
      +  facet_wrap(~ month, ncol = 2, scales = "free_x") +
      +  theme(strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 11),
      +        axis.text.x = element_text(size = 11),
      +        axis.text.y = element_text(size = 11)) 
      +
      +
      +Sentiment scores based on the Bing lexicon +

      +Figure 4: Sentiment scores based on the Bing lexicon +

      +
      +
      +

      The figure above shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments.

      +

      Next, we will use the AFINN lexicon. Unlike Bing that labels words as positive or negative, AFINN assigns a numerical weight to each word. The sign of the weight indicates the polarity of sentiments (i.e., positive or negative) while the value indicates the intensity of sentiments. Now, let’s see if these weighted values produce different sentiment scores.

      +
      +
      +
      wave1_alberta %>%
      +  mutate(day = substr(date, 9, 10)) %>%
      +  group_by(month, day) %>%
      +  inner_join(get_sentiments("afinn")) %>%
      +  group_by(month, day) %>%
      +  summarize(sentiment = sum(value),
      +            type = ifelse(sentiment >= 0, "positive", "negative")) %>%
      +  ggplot(aes(day, sentiment, fill = type)) +
      +  geom_col(show.legend = FALSE) +
      +  labs(x = "Days", y = "Sentiment Score") +
      +  ylim(-100, 100) + 
      +  facet_wrap(~ month, ncol = 2, scales = "free_x") +
      +  theme(legend.position = "none", 
      +        strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 11),
      +        axis.text.x = element_text(size = 11, angle = 90),
      +        axis.text.y = element_text(size = 11))
      +
      +
      +Sentiment scores based on the AFINN lexicon +

      +Figure 5: Sentiment scores based on the AFINN lexicon +

      +
      +
      +

      The results based on the AFINN lexicon seem to be quite different! Once we take the “weight” of the tokens into account, most media briefings turn out to be positive (see the green bars), although there are still some days with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon, but not in the AFINN lexicon. Disregarding those tokens might have impacted the results.

      +

      The final lexicon we are going to try using the tidytext package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s (Plutchik, 1980) psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. We are going to count the number of words or token associated with each emotion and then visualize the results.

      +
      +
      +
      wave1_alberta %>%
      +  mutate(day = substr(date, 9, 10)) %>%
      +  group_by(month, day) %>%
      +  inner_join(get_sentiments("nrc")) %>%
      +  count(month, day, sentiment) %>%
      +  group_by(month, sentiment) %>%
      +  summarize(n_total = sum(n)) %>%
      +  ggplot(aes(n_total, sentiment, fill = sentiment)) +
      +  geom_col(show.legend = FALSE) +
      +  labs(x = "Frequency", y = "") +
      +  xlim(0, 2000) + 
      +  facet_wrap(~ month, ncol = 2, scales = "free_x") +
      +  theme(strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 11),
      +        axis.text.x = element_text(size = 11),
      +        axis.text.y = element_text(size = 11))
      +
      +
      +Sentiment scores based on the NRC lexicon +

      +Figure 6: Sentiment scores based on the NRC lexicon +

      +
      +
      +

      The figure shows that the media briefings are mostly positive each month. Dr. Hinshaw used words associated with “trust”, “anticipation”, and “fear”. Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered.

      +

      Sentiment Analysis with sentimentr

      +

      Another package for lexicon-based sentiment analysis is sentimentr (Rinker, 2021). Unlike the tidytext package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence “I am not unhappy” is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words, “not” and “unhappy”. Similarly, “I hardly like this book” is a negative sentence but the analysis of individual words, “hardly” and “like”, may yield a positive sentiment score. The sentimentr package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on sentimentr: https://github.com/trinker/sentimentr).

      +

      To benefit from the sentimentr package, we need the actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available here. We will first import this dataset into R, get individual sentences for each media briefing using the get_sentences() function, and then calculate sentiment scores by day and month via sentiment_by().

      +
      +
      +
      library("sentimentr")
      +library("magrittr")
      +
      +load("wave1_alberta_sentence.RData")
      +
      +# Calculate sentiment scores by day and month
      +wave1_sentimentr <- wave1_alberta_sentence %>%
      +  mutate(day = substr(date, 9, 10)) %>%
      +  get_sentences() %$%
      +  sentiment_by(text, list(month, day))
      +
      +# View the dataset
      +head(wave1_sentimentr, 10)
      +
      +
      +
      +
      + +
      +
      +

      In the dataset we created, “ave_sentiment” is the average sentiment score for each day in March, April, May, and June (i.e., days where a media briefing was made). Using this dataset, we can visualize the sentiment scores.

      +
      +
      +
      wave1_sentimentr %>%
      +  group_by(month, day) %>%
      +  ggplot(aes(day, ave_sentiment, fill = ave_sentiment)) +
      +  scale_fill_gradient(low="red", high="blue") + 
      +  geom_col(show.legend = FALSE) +
      +  labs(x = "Days", y = "Sentiment Score") +
      +  ylim(-0.1, 0.3) +
      +  facet_wrap(~ month, ncol = 2, scales = "free_x") +
      +  theme(legend.position = "none", 
      +        strip.background = element_blank(),
      +        strip.text = element_text(colour = "black", face = "bold", size = 11),
      +        axis.text.x = element_text(size = 11, angle = 90),
      +        axis.text.y = element_text(size = 11))
      +
      +
      +Sentiment scores based on **sentimentr** +

      +Figure 7: Sentiment scores based on sentimentr +

      +
      +
      +

      In the figure above, the blue bars represent highly positive sentiment scores, while the red bars depict comparatively lower sentiment scores. The patterns observed in the sentiment scores generated by sentimentr closely resemble those derived from the AFINN lexicon. Notably, this analysis is based on the original media briefings rather than solely tokens, with consideration given to valence shifters in the computation of sentiment scores. The convergence between the sentiment patterns identified by sentimentr and those from AFINN is not entirely unexpected. Both approaches incorporate similar weighting systems and mechanisms that account for word intensity. This alignment reinforces our confidence in the initial findings obtained through AFINN, validating the consistency and reliability of our analyses with sentimentr.

      +

      Concluding Remarks

      +

      In conclusion, lexicon-based sentiment analysis in R offers a powerful tool for uncovering the emotional nuances within textual data. Throughout this post, we have explored the fundamental concepts of lexicon-based sentiment analysis and provided a practical demonstration of its implementation using R. By leveraging packages such as sentimentr and tidytext, we have illustrated how sentiment analysis can be seamlessly integrated into your data analysis workflow. As you embark on your journey into sentiment analysis, remember that the insights gained from this technique extend far beyond the surface of text. They provide valuable perspectives on public opinion, consumer sentiment, and beyond. I encourage you to delve deeper into lexicon-based sentiment analysis, experiment with the examples presented here, and unlock the rich insights waiting to be discovered within your own data. Happy analyzing!

      +
      +
      +
      +Benoit, K., Muhr, D., & Watanabe, K. (2021). Stopwords: Multilingual stopword lists. https://CRAN.R-project.org/package=stopwords +
      +
      +Bulut, O., & Poth, C. N. (2022). Rapid assessment of communication consistency: Sentiment analysis of public health briefings during the COVID-19 pandemic. AIMS Public Health, 9(2), 293–306. https://doi.org/10.3934/publichealth.2022020 +
      +
      +Hu, M., & Liu, B. (2004). Mining and summarizing customer reviews. Proceedings of the Tenth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 168–177. +
      +
      +Mohammad, S. M., & Turney, P. D. (2013). Crowdsourcing a word–emotion association lexicon. Computational Intelligence, 29(3), 436–465. +
      +
      +Mullen, L. A., Benoit, K., Keyes, O., Selivanov, D., & Arnold, J. (2018). Fast, consistent tokenization of natural language text. Journal of Open Source Software, 3, 655. https://doi.org/10.21105/joss.00655 +
      +
      +Plutchik, R. (1980). A general psychoevolutionary theory of emotion. In Theories of emotion (pp. 3–33). Elsevier. +
      +
      +Poth, C. N., Bulut, O., Aquilina, A. M., & Otto, S. J. G. (2021). Using data mining for rapid complex case study descriptions: Example of public health briefings during the onset of the COVID-19 pandemic. Journal of Mixed Methods Research, 15(3), 348–373. https://doi.org/10.1177/15586898211013925 +
      +
      +Rinker, T. W. (2018). textstem: Tools for stemming and lemmatizing text. http://github.com/trinker/textstem +
      +
      +Rinker, T. W. (2021). sentimentr: Calculate text polarity sentiment. https://github.com/trinker/sentimentr +
      +
      +Silge, J., & Robinson, D. (2016). Tidytext: Text mining and analysis using tidy data principles in r. JOSS, 1(3). https://doi.org/10.21105/joss.00037 +
      +
      + + + +
      + +
      +
      + + + + + +
      +

      References

      +
      +

      Reuse

      +

      Text and figures are licensed under Creative Commons Attribution CC BY 4.0. The figures that have been reused from other sources don't fall under this license and can be recognized by a note in their caption: "Figure from ...".

      +

      Citation

      +

      For attribution, please cite this work as

      +
      Bulut (2024, Feb. 9). Okan Bulut: Lexicon-Based Sentiment Analysis Using R. Retrieved from https://okan.cloud/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/
      +

      BibTeX citation

      +
      @misc{bulut2024lexicon-based,
      +  author = {Bulut, Okan},
      +  title = {Okan Bulut: Lexicon-Based Sentiment Analysis Using R},
      +  url = {https://okan.cloud/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/},
      +  year = {2024}
      +}
      +
      + + + + + + + + + + + diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png new file mode 100644 index 0000000..00965b9 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch12-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png new file mode 100644 index 0000000..00a64c9 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch13-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png new file mode 100644 index 0000000..7de1d26 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch4-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png new file mode 100644 index 0000000..d317b32 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch5-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png new file mode 100644 index 0000000..01a6f64 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch6-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png new file mode 100644 index 0000000..10f2c60 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch7-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png new file mode 100644 index 0000000..4180c35 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch8-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png new file mode 100644 index 0000000..1576893 Binary files /dev/null and b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/figure-html5/ch9-1.png differ diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js new file mode 100644 index 0000000..dd57d92 --- /dev/null +++ b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/lexicon-based-sentiment-analysis-using-r_files/header-attrs-2.22/header-attrs.js @@ -0,0 +1,12 @@ +// Pandoc 2.9 adds attributes on both header and div. We remove the former (to +// be compatible with the behavior of Pandoc < 2.8). +document.addEventListener('DOMContentLoaded', function(e) { + var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); + var i, h, a; + for (i = 0; i < hs.length; i++) { + h = hs[i]; + if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 + a = h.attributes; + while (a.length > 0) h.removeAttribute(a[0].name); + } +}); diff --git a/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/sentiment.bib b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/sentiment.bib new file mode 100644 index 0000000..dee6894 --- /dev/null +++ b/docs/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/sentiment.bib @@ -0,0 +1,103 @@ +@Article{sentimentbulut, +title = {Rapid assessment of communication consistency: sentiment analysis of public health briefings during the COVID-19 pandemic}, +journal = {AIMS Public Health}, +volume = {9}, +number = {2}, +pages = {293-306}, +year = {2022}, +issn = {2327-8994}, +doi = {10.3934/publichealth.2022020}, +url = {https://www.aimspress.com/article/doi/10.3934/publichealth.2022020}, +author = {Okan Bulut and Cheryl N. Poth}, +keywords = {COVID-19, public health informatics, data mining, sentiment analysis, communication}, +} + +@article{sentimentpoth, +author = {Cheryl N. Poth and Okan Bulut and Alexandra M. Aquilina and Simon J. G. Otto}, +title ={Using Data Mining for Rapid Complex Case Study Descriptions: Example of Public Health Briefings During the Onset of the COVID-19 Pandemic}, +journal = {Journal of Mixed Methods Research}, +volume = {15}, +number = {3}, +pages = {348-373}, +year = {2021}, +doi = {10.1177/15586898211013925}, +URL = {https://doi.org/10.1177/15586898211013925} +} + +@inproceedings{hu2004mining, + title={Mining and summarizing customer reviews}, + author={Hu, Minqing and Liu, Bing}, + booktitle={Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining}, + pages={168--177}, + year={2004} +} + +@article{mohammad2013crowdsourcing, + title={Crowdsourcing a word--emotion association lexicon}, + author={Mohammad, Saif M and Turney, Peter D}, + journal={Computational intelligence}, + volume={29}, + number={3}, + pages={436--465}, + year={2013}, + publisher={Wiley Online Library} +} + +@incollection{plutchik1980general, + title={A general psychoevolutionary theory of emotion}, + author={Plutchik, Robert}, + booktitle={Theories of emotion}, + pages={3--33}, + year={1980}, + publisher={Elsevier} +} + +@Article{R-tidytext, + title = {tidytext: Text Mining and Analysis Using Tidy Data Principles in R}, + author = {Julia Silge and David Robinson}, + doi = {10.21105/joss.00037}, + url = {http://dx.doi.org/10.21105/joss.00037}, + year = {2016}, + publisher = {The Open Journal}, + volume = {1}, + number = {3}, + journal = {JOSS}, + } + +@Article{R-tokenizers, + title = {Fast, Consistent Tokenization of Natural Language Text}, + author = {Lincoln A. Mullen and Kenneth Benoit and Os Keyes and Dmitry Selivanov and Jeffrey Arnold}, + journal = {Journal of Open Source Software}, + year = {2018}, + volume = {3}, + issue = {23}, + pages = {655}, + url = {https://doi.org/10.21105/joss.00655}, + doi = {10.21105/joss.00655}, + } + +@Manual{R-stopwords, + title = {stopwords: Multilingual Stopword Lists}, + author = {Kenneth Benoit and David Muhr and Kohei Watanabe}, + year = {2021}, + note = {R package version 2.3}, + url = {https://CRAN.R-project.org/package=stopwords}, + } + +@Manual{R-textstem, + title = {{textstem}: Tools for stemming and lemmatizing text}, + author = {Tyler W. Rinker}, + address = {Buffalo, New York}, + note = {version 0.1.4}, + year = {2018}, + url = {http://github.com/trinker/textstem}, + } + +@Manual{R-sentiment, + title = {{sentimentr}: Calculate Text Polarity Sentiment}, + author = {Tyler W. Rinker}, + address = {Buffalo, New York}, + note = {version 2.9.0}, + year = {2021}, + url = {https://github.com/trinker/sentimentr}, +} \ No newline at end of file diff --git a/docs/posts/posts.json b/docs/posts/posts.json index a595a2f..52413d9 100644 --- a/docs/posts/posts.json +++ b/docs/posts/posts.json @@ -1,4 +1,26 @@ [ + { + "path": "posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/", + "title": "Lexicon-Based Sentiment Analysis Using R", + "description": "In this post, we will uncover the power of lexicon-based sentiment analysis using R. I demonstrate how to harness the capabilities of lexicons like NRC and Bing to decipher the emotional pulse of your text data. With practical examples, you'll gain the skills to analyze sentiment scores and extract valuable insights from your textual data sets.\n\n(12 min read)", + "author": [ + { + "name": "Okan Bulut", + "url": "http://www.okanbulut.com/" + } + ], + "date": "2024-02-09", + "categories": [ + "data science", + "natural language processing", + "text mining", + "sentiment" + ], + "contents": "\r\n\r\nContents\r\nIntroduction\r\nLexicon-Based Sentiment Analysis\r\nExample\r\nDescriptive Analysis\r\nSentiment Analysis with tidytext\r\nSentiment Analysis with sentimentr\r\n\r\nConcluding Remarks\r\n\r\nPhoto by Roman Odintsov on PexelsIntroduction\r\nDuring the COVID-19 pandemic, I decided to learn a new statistical technique to keep my mind occupied rather than constantly immersing myself in pandemic-related news. After evaluating several options, I found the concepts related to natural language processing (NLP) particularly captivating. So, I opted to delve deeper into this field and explore one specific technique: sentiment analysis, also known as “opinion mining” in academic literature. This analytical method empowers researchers to extract and interpret the emotions conveyed toward a specific subject within written text. Through sentiment analysis, one can discern the polarity (positive or negative), nature, and intensity of sentiments expressed across various textual formats such as documents, customer reviews, and social media posts.\r\nAmidst the pandemic, I observed a significant trend among researchers who turned to sentiment analysis as a tool to measure public responses to news and developments surrounding the virus. This involved analyzing user-generated content on popular social media platforms such as Twitter, YouTube, and Instagram. Intrigued by this methodology, my colleagues and I endeavored to contribute to the existing body of research by scrutinizing the daily briefings provided by public health authorities. In Alberta, Dr. Deena Hinshaw, who used to be the province’s chief medical officer of health, regularly delivered updates on the region’s response to the ongoing pandemic. Through our analysis of these public health announcements, we aimed to assess Alberta’s effectiveness in implementing communication strategies during this intricate public health crisis. Our investigation, conducted through the lenses of sentiment analysis, sought to shed light on the efficacy of communication strategies employed during this challenging period in public health (Bulut & Poth, 2022; Poth et al., 2021).\r\nIn this post, I aim to walk you through the process of performing sentiment analysis using R. Specifically, I’ll focus on “lexicon-based sentiment analysis,” which I’ll discuss in more detail in the next section. I’ll provide examples of lexicon-based sentiment analysis that we’ve integrated into the publications referenced earlier. Additionally, in future posts, I’ll delve into more advanced forms of sentiment analysis, making use of state-of-the-art pre-trained models accessible on Hugging Face.\r\nLexicon-Based Sentiment Analysis\r\nAs I learned more about sentiment analysis, I discovered that the predominant method for extracting sentiments is lexicon-based sentiment analysis. This approach entails utilizing a specific lexicon, essentially the vocabulary of a language or subject, to discern the direction and intensity of sentiments conveyed within a given text. Some lexicons, like the Bing lexicon (Hu & Liu, 2004), classify words as either positive or negative. Conversely, other lexicons provide more detailed sentiment labels, such as the NRC Emotion Lexicon (Mohammad & Turney, 2013), which categorizes words based on both positive and negative sentiments, as well as Plutchik’s (Plutchik, 1980) psych evolutionary theory of basic emotions (e.g., anger, fear, anticipation, trust, surprise, sadness, joy, and disgust).\r\nLexicon-based sentiment analysis operates by aligning words within a given text with those found in widely-used lexicons such as NRC and Bing. Each word receives an assigned sentiment, typically categorized as positive or negative. The text’s collective sentiment score is subsequently derived by summing the individual sentiment scores of its constituent words. For instance, in a scenario where a text incorporates 50 positive and 30 negative words according to the Bing lexicon, the resulting sentiment score would be 20. This value indicates a predominance of positive sentiments within the text. Conversely, a negative total would imply a prevalence of negative sentiments.\r\nPerforming lexicon-based sentiment analysis using R can be both fun and tricky at the same time. While analyzing public health announcements in terms of sentiments, I found Julia Silge and David Robinson’s book, Text Mining with R, to be very helpful. The book has a chapter dedicated to sentiment analysis, where the authors demonstrate how to conduct sentiment analysis using general-purpose lexicons like Bing and NRC. However, Julia and David also highlight a major limitation of lexicon-based sentiment analysis. The analysis considers only single words (i.e., unigrams) and does not consider qualifiers before a word. For instance, negation words like “not” in “not true” are ignored, and sentiment analysis processes them as two separate words, “not” and “true”. Furthermore, if a particular word (either positive or negative) is repeatedly used throughout the text, this may skew the results depending on the polarity (positive or negative) of this word. Therefore, the results of lexicon-based sentiment analysis should be interpreted carefully.\r\nNow, let’s move to our example where we will conduct lexicon-based sentiment analysis using Dr. Deena Hinshaw’s media briefings during the COVID-19 pandemic. My goal is to showcase two R packages capable of running sentiment analysis 📉.\r\nExample\r\nFor the sake of simplicity, we will focus on the first wave of the pandemic (March 2020 - June 2020). The transcripts of all media briefings were available in the government of Alberta’s COVID-19 pandemic website (https://www.alberta.ca/covid). After importing these transcripts into R, I turned all the text into lowercase and then applied word tokenization using the tidytext (Silge & Robinson, 2016) and tokenizers (Mullen et al., 2018) packages. Word tokenization split the sentences in the media briefings into individual words for each entry (i.e., day of media briefings). Next, I applied lemmatization to the tokens to resolve each word into its canonical form using the textstem package (Rinker, 2018). Finally, I removed common stopwords, such as “my”, “for”, “that”, “with”, and “for, using the stopwords package (Benoit et al., 2021). The final dataset is available here. Now, let’s import the data into R and then review its content.\r\n\r\n\r\nload(\"wave1_alberta.RData\")\r\n\r\nhead(wave1_alberta, 10)\r\n\r\n\r\n\r\n\r\n\r\n\r\nThe dataset has three columns:\r\nmonth (the month of the media briefing)\r\ndate (the exact date of the media briefing), and\r\nword (words or tokens used in media briefing)\r\nDescriptive Analysis\r\nNow, we can calculate some descriptive statistics to better understand the content of our dataset. We will begin by finding the top 5 words (based on their frequency) for each month.\r\n\r\n\r\nlibrary(\"dplyr\")\r\n\r\nwave1_alberta %>%\r\n group_by(month) %>%\r\n count(word, sort = TRUE) %>%\r\n slice_head(n = 5) %>%\r\n as.data.frame()\r\n\r\n month word n\r\n1 March 2020 health 199\r\n2 March 2020 care 102\r\n3 March 2020 continue 102\r\n4 March 2020 spread 87\r\n5 March 2020 test 86\r\n6 April 2020 test 156\r\n7 April 2020 health 146\r\n8 April 2020 care 145\r\n9 April 2020 continue 135\r\n10 April 2020 spread 129\r\n11 May 2020 health 135\r\n12 May 2020 continue 118\r\n13 May 2020 test 102\r\n14 May 2020 people 78\r\n15 May 2020 public 78\r\n16 June 2020 test 126\r\n17 June 2020 health 93\r\n18 June 2020 continue 69\r\n19 June 2020 people 57\r\n20 June 2020 community 43\r\n\r\nThe output shows that words such as health, continue, and test were commonly used in the media briefings across this 4-month period. We can also expand our list to the most common 10 words and view the results visually:\r\n\r\n\r\nlibrary(\"tidytext\")\r\nlibrary(\"ggplot2\")\r\n\r\nwave1_alberta %>%\r\n group_by(month) %>%\r\n count(word, sort = TRUE) %>%\r\n # Find the top 10 words\r\n slice_head(n = 10) %>%\r\n ungroup() %>%\r\n # Order the words by their frequency within each month\r\n mutate(word = reorder_within(word, n, month)) %>%\r\n # Create a bar graph\r\n ggplot(aes(x = n, y = word, fill = month)) +\r\n geom_col() +\r\n scale_y_reordered() +\r\n facet_wrap(~ month, scales = \"free_y\") +\r\n labs(x = \"Frequency\", y = NULL) +\r\n theme(legend.position = \"none\",\r\n axis.text.x = element_text(size = 11),\r\n axis.text.y = element_text(size = 11),\r\n strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 13))\r\n\r\n\r\n\r\nFigure 1: Most common words based on frequency\r\n\r\n\r\n\r\nSince some words are common across all four months, the plot above may not necessarily show us the important words that are unique to each month. To find such important words, we can use Term Frequency - Inverse Document Frequency (TF-IDF)–a widely used technique in NLP for measuring how important a term is within a document relative to a collection of documents (for more detailed information about TF-IDF, check out my previous blog post). In our example, we will treat media briefings for each month as a document and calculate TF-IDF for the tokens (i.e., words) within each document. The first part of the R codes below creates a new dataset, wave1_tf_idf, by calculating TF-IDF for all tokens and selecting the tokens with the highest TF-IDF values within each month. Next, we use this dataset to create a bar plot with the TF-IDF values to view the common words unique to each month.\r\n\r\n\r\n# Calculate TF-IDF for the words for each month\r\nwave1_tf_idf <- wave1_alberta %>%\r\n count(month, word, sort = TRUE) %>%\r\n bind_tf_idf(word, month, n) %>%\r\n arrange(month, -tf_idf) %>%\r\n group_by(month) %>%\r\n top_n(10) %>%\r\n ungroup\r\n\r\n# Visualize the results\r\nwave1_tf_idf %>%\r\n mutate(word = reorder_within(word, tf_idf, month)) %>%\r\n ggplot(aes(word, tf_idf, fill = month)) +\r\n geom_col(show.legend = FALSE) + \r\n facet_wrap(~ month, scales = \"free\", ncol = 2) +\r\n scale_x_reordered() +\r\n coord_flip() +\r\n theme(strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 13),\r\n axis.text.x = element_text(size = 11),\r\n axis.text.y = element_text(size = 11)) +\r\n labs(x = NULL, y = \"TF-IDF\")\r\n\r\n\r\n\r\nFigure 2: Most common words based on TIF-IDF\r\n\r\n\r\n\r\nThese results are more informative because the tokens shown in the figure reflect unique topics discussed each month. For example, in March 2020, the media briefings were mostly about limiting the travels, returning from crowded conferences, and COVID-19 cases in cruise ships. In June 2020, the focus of the media briefings shifted towards mask requirements, people protesting pandemic-related restrictions, and so on. Before we switch back to the sentiment analysis, let’s take a look at another descriptive variable: the length of each media briefing. This will show us whether the media briefings became longer or shorter over time.\r\n\r\n\r\nwave1_alberta %>%\r\n mutate(day = substr(date, 9, 10)) %>%\r\n group_by(month, day) %>%\r\n summarize(n = n()) %>%\r\n ggplot(aes(day, n, color = month, shape = month, group = month)) +\r\n geom_point(size = 2) + \r\n geom_line() + \r\n labs(x = \"Days\", y = \"Number of Words\") +\r\n theme(legend.position = \"none\", \r\n axis.text.x = element_text(angle = 90, size = 11),\r\n strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 11),\r\n axis.text.y = element_text(size = 11)) +\r\n ylim(0, 800) +\r\n facet_wrap(~ month, scales = \"free_x\")\r\n\r\n\r\n\r\nFigure 3: Number of words by days\r\n\r\n\r\n\r\nThe figure above shows that the length of media briefings varied quite substantially over time. Especially in March and May, there are larger fluctuations (i.e., very long or short briefings), whereas in June, the daily media briefings are quite similar in terms of length.\r\nSentiment Analysis with tidytext\r\nAfter analyzing the dataset descriptively, we are ready to begin with the sentiment analysis. In the first part, we will use the tidytext package for performing sentiment analysis and computing sentiment scores. We will first import the lexicons into R and then merge them with our dataset. Using the Bing lexicon, we need to find the difference between the number of positive and negative words to produce a sentiment score (i.e., sentiment = the number of positive words - the number of negative words).\r\n\r\n\r\n# From the three lexicons, Bing is already available in the tidytext page\r\n# for AFINN and NRC, install the textdata package by uncommenting the next line\r\n# install.packages(\"textdata\")\r\nget_sentiments(\"bing\") \r\n\r\n# A tibble: 6,786 × 2\r\n word sentiment\r\n \r\n 1 2-faces negative \r\n 2 abnormal negative \r\n 3 abolish negative \r\n 4 abominable negative \r\n 5 abominably negative \r\n 6 abominate negative \r\n 7 abomination negative \r\n 8 abort negative \r\n 9 aborted negative \r\n10 aborts negative \r\n# ℹ 6,776 more rows\r\n\r\nget_sentiments(\"afinn\") \r\n\r\n# A tibble: 2,477 × 2\r\n word value\r\n \r\n 1 abandon -2\r\n 2 abandoned -2\r\n 3 abandons -2\r\n 4 abducted -2\r\n 5 abduction -2\r\n 6 abductions -2\r\n 7 abhor -3\r\n 8 abhorred -3\r\n 9 abhorrent -3\r\n10 abhors -3\r\n# ℹ 2,467 more rows\r\n\r\nget_sentiments(\"nrc\")\r\n\r\n# A tibble: 13,901 × 2\r\n word sentiment\r\n \r\n 1 abacus trust \r\n 2 abandon fear \r\n 3 abandon negative \r\n 4 abandon sadness \r\n 5 abandoned anger \r\n 6 abandoned fear \r\n 7 abandoned negative \r\n 8 abandoned sadness \r\n 9 abandonment anger \r\n10 abandonment fear \r\n# ℹ 13,891 more rows\r\n\r\n# We will need the spread function from tidyr\r\nlibrary(\"tidyr\")\r\n\r\n# Sentiment scores with bing (based on frequency)\r\nwave1_alberta %>%\r\n mutate(day = substr(date, 9, 10)) %>%\r\n group_by(month, day) %>%\r\n inner_join(get_sentiments(\"bing\")) %>%\r\n count(month, day, sentiment) %>%\r\n spread(sentiment, n) %>%\r\n mutate(sentiment = positive - negative) %>%\r\n ggplot(aes(day, sentiment, fill = month)) +\r\n geom_col(show.legend = FALSE) +\r\n labs(x = \"Days\", y = \"Sentiment Score\") +\r\n ylim(-50, 50) + \r\n theme(legend.position = \"none\", axis.text.x = element_text(angle = 90)) +\r\n facet_wrap(~ month, ncol = 2, scales = \"free_x\") +\r\n theme(strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 11),\r\n axis.text.x = element_text(size = 11),\r\n axis.text.y = element_text(size = 11)) \r\n\r\n\r\n\r\nFigure 4: Sentiment scores based on the Bing lexicon\r\n\r\n\r\n\r\nThe figure above shows that the sentiments delivered in the media briefings were generally negative, which is not necessarily surprising since the media briefings were all about how many people passed away, hospitalization rates, potential outbreaks, etc. On certain days (e.g., March 24, 2020 and May 4, 2020), the media briefings were particularly more negative in terms of sentiments.\r\nNext, we will use the AFINN lexicon. Unlike Bing that labels words as positive or negative, AFINN assigns a numerical weight to each word. The sign of the weight indicates the polarity of sentiments (i.e., positive or negative) while the value indicates the intensity of sentiments. Now, let’s see if these weighted values produce different sentiment scores.\r\n\r\n\r\nwave1_alberta %>%\r\n mutate(day = substr(date, 9, 10)) %>%\r\n group_by(month, day) %>%\r\n inner_join(get_sentiments(\"afinn\")) %>%\r\n group_by(month, day) %>%\r\n summarize(sentiment = sum(value),\r\n type = ifelse(sentiment >= 0, \"positive\", \"negative\")) %>%\r\n ggplot(aes(day, sentiment, fill = type)) +\r\n geom_col(show.legend = FALSE) +\r\n labs(x = \"Days\", y = \"Sentiment Score\") +\r\n ylim(-100, 100) + \r\n facet_wrap(~ month, ncol = 2, scales = \"free_x\") +\r\n theme(legend.position = \"none\", \r\n strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 11),\r\n axis.text.x = element_text(size = 11, angle = 90),\r\n axis.text.y = element_text(size = 11))\r\n\r\n\r\n\r\nFigure 5: Sentiment scores based on the AFINN lexicon\r\n\r\n\r\n\r\nThe results based on the AFINN lexicon seem to be quite different! Once we take the “weight” of the tokens into account, most media briefings turn out to be positive (see the green bars), although there are still some days with negative sentiments (see the red bars). The two analyses we have done so far have yielded very different for two reasons. First, as I mentioned above, the Bing lexicon focuses on the polarity of the words but ignore the intensity of the words (dislike and hate are considered negative words with equal intensity). Unlike the Bing lexicon, the AFINN lexicon takes the intensity into account, which impacts the calculation of the sentiment scores. Second, the Bing lexicon (6786 words) is fairly larger than the AFINN lexicon (2477 words). Therefore, it is likely that some tokens in the media briefings are included in the Bing lexicon, but not in the AFINN lexicon. Disregarding those tokens might have impacted the results.\r\nThe final lexicon we are going to try using the tidytext package is NRC. As I mentioned earlier, this lexicon uses uses Plutchik’s (Plutchik, 1980) psych evolutionary theory to label the tokens based on basic emotions such as anger, fear, and anticipation. We are going to count the number of words or token associated with each emotion and then visualize the results.\r\n\r\n\r\nwave1_alberta %>%\r\n mutate(day = substr(date, 9, 10)) %>%\r\n group_by(month, day) %>%\r\n inner_join(get_sentiments(\"nrc\")) %>%\r\n count(month, day, sentiment) %>%\r\n group_by(month, sentiment) %>%\r\n summarize(n_total = sum(n)) %>%\r\n ggplot(aes(n_total, sentiment, fill = sentiment)) +\r\n geom_col(show.legend = FALSE) +\r\n labs(x = \"Frequency\", y = \"\") +\r\n xlim(0, 2000) + \r\n facet_wrap(~ month, ncol = 2, scales = \"free_x\") +\r\n theme(strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 11),\r\n axis.text.x = element_text(size = 11),\r\n axis.text.y = element_text(size = 11))\r\n\r\n\r\n\r\nFigure 6: Sentiment scores based on the NRC lexicon\r\n\r\n\r\n\r\nThe figure shows that the media briefings are mostly positive each month. Dr. Hinshaw used words associated with “trust”, “anticipation”, and “fear”. Overall, the pattern of these emotions seems to remain very similar over time, indicating the consistency of the media briefings in terms of the type and intensity of the emotions delivered.\r\nSentiment Analysis with sentimentr\r\nAnother package for lexicon-based sentiment analysis is sentimentr (Rinker, 2021). Unlike the tidytext package, this package takes valence shifters (e.g., negation) into account, which can easily flip the polarity of a sentence with one word. For example, the sentence “I am not unhappy” is actually positive but if we analyze it word by word, the sentence may seem to have a negative sentiment due to the words, “not” and “unhappy”. Similarly, “I hardly like this book” is a negative sentence but the analysis of individual words, “hardly” and “like”, may yield a positive sentiment score. The sentimentr package addresses the limitations around sentiment detection with valence shifters (see the package author Tyler Rinker’s Github page for further details on sentimentr: https://github.com/trinker/sentimentr).\r\nTo benefit from the sentimentr package, we need the actual sentences in the media briefings rather than the individual tokens. Therefore, I had to create an untokenized version of the dataset, which is available here. We will first import this dataset into R, get individual sentences for each media briefing using the get_sentences() function, and then calculate sentiment scores by day and month via sentiment_by().\r\n\r\n\r\nlibrary(\"sentimentr\")\r\nlibrary(\"magrittr\")\r\n\r\nload(\"wave1_alberta_sentence.RData\")\r\n\r\n# Calculate sentiment scores by day and month\r\nwave1_sentimentr <- wave1_alberta_sentence %>%\r\n mutate(day = substr(date, 9, 10)) %>%\r\n get_sentences() %$%\r\n sentiment_by(text, list(month, day))\r\n\r\n# View the dataset\r\nhead(wave1_sentimentr, 10)\r\n\r\n\r\n\r\n\r\n\r\n\r\nIn the dataset we created, “ave_sentiment” is the average sentiment score for each day in March, April, May, and June (i.e., days where a media briefing was made). Using this dataset, we can visualize the sentiment scores.\r\n\r\n\r\nwave1_sentimentr %>%\r\n group_by(month, day) %>%\r\n ggplot(aes(day, ave_sentiment, fill = ave_sentiment)) +\r\n scale_fill_gradient(low=\"red\", high=\"blue\") + \r\n geom_col(show.legend = FALSE) +\r\n labs(x = \"Days\", y = \"Sentiment Score\") +\r\n ylim(-0.1, 0.3) +\r\n facet_wrap(~ month, ncol = 2, scales = \"free_x\") +\r\n theme(legend.position = \"none\", \r\n strip.background = element_blank(),\r\n strip.text = element_text(colour = \"black\", face = \"bold\", size = 11),\r\n axis.text.x = element_text(size = 11, angle = 90),\r\n axis.text.y = element_text(size = 11))\r\n\r\n\r\n\r\nFigure 7: Sentiment scores based on sentimentr\r\n\r\n\r\n\r\nIn the figure above, the blue bars represent highly positive sentiment scores, while the red bars depict comparatively lower sentiment scores. The patterns observed in the sentiment scores generated by sentimentr closely resemble those derived from the AFINN lexicon. Notably, this analysis is based on the original media briefings rather than solely tokens, with consideration given to valence shifters in the computation of sentiment scores. The convergence between the sentiment patterns identified by sentimentr and those from AFINN is not entirely unexpected. Both approaches incorporate similar weighting systems and mechanisms that account for word intensity. This alignment reinforces our confidence in the initial findings obtained through AFINN, validating the consistency and reliability of our analyses with sentimentr.\r\nConcluding Remarks\r\nIn conclusion, lexicon-based sentiment analysis in R offers a powerful tool for uncovering the emotional nuances within textual data. Throughout this post, we have explored the fundamental concepts of lexicon-based sentiment analysis and provided a practical demonstration of its implementation using R. By leveraging packages such as sentimentr and tidytext, we have illustrated how sentiment analysis can be seamlessly integrated into your data analysis workflow. As you embark on your journey into sentiment analysis, remember that the insights gained from this technique extend far beyond the surface of text. They provide valuable perspectives on public opinion, consumer sentiment, and beyond. I encourage you to delve deeper into lexicon-based sentiment analysis, experiment with the examples presented here, and unlock the rich insights waiting to be discovered within your own data. Happy analyzing!\r\n\r\n\r\n\r\nBenoit, K., Muhr, D., & Watanabe, K. (2021). Stopwords: Multilingual stopword lists. https://CRAN.R-project.org/package=stopwords\r\n\r\n\r\nBulut, O., & Poth, C. N. (2022). Rapid assessment of communication consistency: Sentiment analysis of public health briefings during the COVID-19 pandemic. AIMS Public Health, 9(2), 293–306. https://doi.org/10.3934/publichealth.2022020\r\n\r\n\r\nHu, M., & Liu, B. (2004). Mining and summarizing customer reviews. Proceedings of the Tenth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 168–177.\r\n\r\n\r\nMohammad, S. M., & Turney, P. D. (2013). Crowdsourcing a word–emotion association lexicon. Computational Intelligence, 29(3), 436–465.\r\n\r\n\r\nMullen, L. A., Benoit, K., Keyes, O., Selivanov, D., & Arnold, J. (2018). Fast, consistent tokenization of natural language text. Journal of Open Source Software, 3, 655. https://doi.org/10.21105/joss.00655\r\n\r\n\r\nPlutchik, R. (1980). A general psychoevolutionary theory of emotion. In Theories of emotion (pp. 3–33). Elsevier.\r\n\r\n\r\nPoth, C. N., Bulut, O., Aquilina, A. M., & Otto, S. J. G. (2021). Using data mining for rapid complex case study descriptions: Example of public health briefings during the onset of the COVID-19 pandemic. Journal of Mixed Methods Research, 15(3), 348–373. https://doi.org/10.1177/15586898211013925\r\n\r\n\r\nRinker, T. W. (2018). textstem: Tools for stemming and lemmatizing text. http://github.com/trinker/textstem\r\n\r\n\r\nRinker, T. W. (2021). sentimentr: Calculate text polarity sentiment. https://github.com/trinker/sentimentr\r\n\r\n\r\nSilge, J., & Robinson, D. (2016). Tidytext: Text mining and analysis using tidy data principles in r. JOSS, 1(3). https://doi.org/10.21105/joss.00037\r\n\r\n\r\n\r\n\r\n", + "preview": "posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/emoji.jpg", + "last_modified": "2024-02-09T10:11:54-07:00", + "input_file": {} + }, { "path": "posts/2024-01-04-introduction-to-psychometric-network-analysis/", "title": "Introduction to Psychometric Network Analysis", diff --git a/docs/search.json b/docs/search.json index a45ad46..548fe1c 100644 --- a/docs/search.json +++ b/docs/search.json @@ -4,15 +4,15 @@ "path": "about.html", "title": "Okan Bulut", "author": [], - "contents": "\r\n\r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n Home\r\n About\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ☰\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n \r\n \r\n \r\n \r\n LinkedIn\r\n \r\n \r\n \r\n \r\n \r\n \r\n Twitter\r\n \r\n \r\n \r\n \r\n \r\n \r\n GitHub\r\n \r\n \r\n \r\n \r\n \r\n \r\n Email\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Welcome to the Psychometrics and Data Science\r\n with R and Python blog!\r\n I am an Associate Professor in the Measurement,\r\n Evaluation, and Data Science program at the University of\r\n Alberta. I teach graduate courses and workshops on\r\n psychometrics, educational measurement, and statistical\r\n modeling using R. For more information, you can visit my\r\n personal website: www.okanbulut.com.\r\n As a passionate R user, I always conduct statistical and\r\n psychometric analysis on educational and psychological data\r\n using R. I often write my own functions but I also benefit\r\n from the existing R packages available on CRAN, GitHub, and other platforms.\r\n In 2018, my colleague Chris Desjardins and I co-authored the\r\n Handbook\r\n of Educational Measurement and Psychometrics Using R. We\r\n wanted to present important topics in measurement and\r\n psychometrics as well as their applications in R. Our book\r\n comes with a complimentary package, hemp,\r\n that helps our readers reproduce the content presented in\r\n the book. The hemp package and R script\r\n files for each chapter of our book are available on GitHub:\r\n https://github.com/cddesja/hemp.\r\n In this blog, I hope to continue sharing psychometric\r\n applications using R. In addition, I plan to present new\r\n examples focusing on the state-of-the-art methods in data\r\n science and educational data mining using Python. Questions,\r\n comments, and suggestions from all readers are welcomed!\r\n Lastly, if my blog has helped you, you can buy me\r\n coffee :-)\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n \r\n \r\n \r\n \r\n LinkedIn\r\n \r\n \r\n \r\n \r\n Twitter\r\n \r\n \r\n \r\n \r\n GitHub\r\n \r\n \r\n \r\n \r\n Email\r\n \r\n \r\n \r\n \r\n \r\n \r\n Welcome to the Psychometrics and Data Science\r\n with R and Python blog!\r\n I am an Associate Professor in the Measurement,\r\n Evaluation, and Data Science program at the University of\r\n Alberta. I teach graduate courses and workshops on\r\n psychometrics, educational measurement, and statistical\r\n modeling using R. For more information, you can visit my\r\n personal website: www.okanbulut.com.\r\n As a passionate R user, I always conduct statistical\r\n and psychometric analysis on educational and psychological\r\n data using R. I often write my own functions but I also\r\n benefit from the existing R packages available on CRAN, GitHub, and other\r\n platforms. In 2018, my colleague Chris Desjardins and I\r\n co-authored the Handbook\r\n of Educational Measurement and Psychometrics Using R.\r\n We wanted to present important topics in measurement and\r\n psychometrics as well as their applications in R. Our book\r\n comes with a complimentary package, hemp,\r\n that helps our readers reproduce the content presented in\r\n the book. The hemp package and R script\r\n files for each chapter of our book are available on\r\n GitHub: https://github.com/cddesja/hemp.\r\n In this blog, I hope to continue sharing psychometric\r\n applications using R. In addition, I plan to present new\r\n examples focusing on the state-of-the-art methods in data\r\n science and educational data mining using Python.\r\n Questions, comments, and suggestions from all readers are\r\n welcomed! Lastly, if my blog has helped you, you can buy me\r\n coffee :-)\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n © Copyright Okan Bulut 2021. Made with the R distill package.\r\n \r\n \r\n\r\n \r\n ", - "last_modified": "2024-01-04T14:26:08-07:00" + "contents": "\r\n\r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n Home\r\n About\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ☰\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n \r\n \r\n \r\n \r\n LinkedIn\r\n \r\n \r\n \r\n \r\n \r\n \r\n Twitter\r\n \r\n \r\n \r\n \r\n \r\n \r\n GitHub\r\n \r\n \r\n \r\n \r\n \r\n \r\n Email\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Welcome to my blog, Psychometrics and Data\r\n Science with R and Python!\r\n I am an Associate Professor in the Measurement,\r\n Evaluation, and Data Science program at the University of\r\n Alberta. I teach graduate courses and workshops on\r\n psychometrics, machine learning, and statistical modeling\r\n using R. For more information, you can visit my personal\r\n website: www.okanbulut.com.\r\n As a passionate R user, I conduct statistical and\r\n psychometric analysis on educational and psychological data\r\n using R. I often write my own functions but I also benefit\r\n from the existing R packages available on CRAN, GitHub, and other platforms.\r\n In 2018, my colleague Chris Desjardins and I co-authored the\r\n Handbook\r\n of Educational Measurement and Psychometrics Using R. We\r\n wanted to present important topics in measurement and\r\n psychometrics as well as their applications in R. Our book\r\n comes with a complimentary package, hemp,\r\n that helps our readers reproduce the content presented in\r\n the book. The hemp package and R script\r\n files for each chapter of our book are available on GitHub:\r\n https://github.com/cddesja/hemp.\r\n In this blog, I hope to continue sharing psychometric\r\n applications using R. In addition, I also plan to present\r\n new examples focusing on the state-of-the-art methods in\r\n psychometrics, data science, and educational data mining\r\n using Python. Questions, comments, and suggestions from all\r\n readers are welcomed! Lastly, if my blog has helped you, you\r\n can buy me\r\n coffee :-)\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n Okan Bulut\r\n \r\n \r\n \r\n \r\n \r\n \r\n LinkedIn\r\n \r\n \r\n \r\n \r\n Twitter\r\n \r\n \r\n \r\n \r\n GitHub\r\n \r\n \r\n \r\n \r\n Email\r\n \r\n \r\n \r\n \r\n \r\n \r\n Welcome to my blog, Psychometrics and Data\r\n Science with R and Python!\r\n I am an Associate Professor in the Measurement,\r\n Evaluation, and Data Science program at the University of\r\n Alberta. I teach graduate courses and workshops on\r\n psychometrics, machine learning, and statistical modeling\r\n using R. For more information, you can visit my personal\r\n website: www.okanbulut.com.\r\n As a passionate R user, I conduct statistical and\r\n psychometric analysis on educational and psychological\r\n data using R. I often write my own functions but I also\r\n benefit from the existing R packages available on CRAN, GitHub, and other\r\n platforms. In 2018, my colleague Chris Desjardins and I\r\n co-authored the Handbook\r\n of Educational Measurement and Psychometrics Using R.\r\n We wanted to present important topics in measurement and\r\n psychometrics as well as their applications in R. Our book\r\n comes with a complimentary package, hemp,\r\n that helps our readers reproduce the content presented in\r\n the book. The hemp package and R script\r\n files for each chapter of our book are available on\r\n GitHub: https://github.com/cddesja/hemp.\r\n In this blog, I hope to continue sharing psychometric\r\n applications using R. In addition, I also plan to present\r\n new examples focusing on the state-of-the-art methods in\r\n psychometrics, data science, and educational data mining\r\n using Python. Questions, comments, and suggestions from\r\n all readers are welcomed! Lastly, if my blog has helped\r\n you, you can buy me\r\n coffee :-)\r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n © Copyright Okan Bulut 2024. Made with the R distill package.\r\n \r\n \r\n\r\n \r\n ", + "last_modified": "2024-02-09T10:12:39-07:00" }, { "path": "index.html", "title": "Psychometrics and Data Science with R and Python", "author": [], "contents": "\r\n\r\n\r\n\r\n", - "last_modified": "2024-01-04T14:26:10-07:00" + "last_modified": "2024-02-09T10:12:42-07:00" } ], "collections": ["posts/posts.json"] diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 7b8f714..fea15cc 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -2,12 +2,16 @@ https://okan.cloud/about.html - 2021-12-09T16:37:49-07:00 + 2024-01-05T15:53:35-07:00 https://okan.cloud/ 2021-12-09T16:36:24-07:00 + + https://okan.cloud/posts/2024-02-09-lexicon-based-sentiment-analysis-using-r/ + 2024-02-09T10:11:54-07:00 + https://okan.cloud/posts/2024-01-04-introduction-to-psychometric-network-analysis/ 2024-01-04T14:25:23-07:00