diff --git a/2013/04/14/what-is-markdown/index.html b/2013/04/14/what-is-markdown/index.html index b3adfd9a..54aaaa8b 100644 --- a/2013/04/14/what-is-markdown/index.html +++ b/2013/04/14/what-is-markdown/index.html @@ -800,18 +800,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -821,18 +821,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2013/05/11/markdonw-syntax-for-block-elements/index.html b/2013/05/11/markdonw-syntax-for-block-elements/index.html index 9fc213ab..43c1d835 100644 --- a/2013/05/11/markdonw-syntax-for-block-elements/index.html +++ b/2013/05/11/markdonw-syntax-for-block-elements/index.html @@ -849,18 +849,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -870,18 +870,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -891,18 +891,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -912,18 +912,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -933,18 +933,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2013/05/21/what-editor-tools-used-for-markdown/index.html b/2013/05/21/what-editor-tools-used-for-markdown/index.html index c85348e8..74f0ff9f 100644 --- a/2013/05/21/what-editor-tools-used-for-markdown/index.html +++ b/2013/05/21/what-editor-tools-used-for-markdown/index.html @@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -928,18 +928,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2013/06/24/markdown-syntax-for-span-elements/index.html b/2013/06/24/markdown-syntax-for-span-elements/index.html index 5fd25b01..8be9752e 100644 --- a/2013/06/24/markdown-syntax-for-span-elements/index.html +++ b/2013/06/24/markdown-syntax-for-span-elements/index.html @@ -881,18 +881,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -902,18 +902,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -923,18 +923,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -944,18 +944,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -965,18 +965,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2013/07/03/backslash-escapes-in-markdown/index.html b/2013/07/03/backslash-escapes-in-markdown/index.html index e86c14ed..74b311c8 100644 --- a/2013/07/03/backslash-escapes-in-markdown/index.html +++ b/2013/07/03/backslash-escapes-in-markdown/index.html @@ -824,18 +824,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -845,18 +845,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -866,18 +866,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -887,18 +887,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -908,18 +908,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2013/10/03/what-is-andriod/index.html b/2013/10/03/what-is-andriod/index.html index 8ea11502..9b3bd2b8 100644 --- a/2013/10/03/what-is-andriod/index.html +++ b/2013/10/03/what-is-andriod/index.html @@ -813,18 +813,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -834,18 +834,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -855,18 +855,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -876,18 +876,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -897,18 +897,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/01/03/what-is-git/index.html b/2014/01/03/what-is-git/index.html index 291da18d..f287202f 100644 --- a/2014/01/03/what-is-git/index.html +++ b/2014/01/03/what-is-git/index.html @@ -816,18 +816,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/01/16/git-commands-1/index.html b/2014/01/16/git-commands-1/index.html index 8f01a9b5..4efb31f4 100644 --- a/2014/01/16/git-commands-1/index.html +++ b/2014/01/16/git-commands-1/index.html @@ -850,18 +850,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -871,18 +871,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -892,18 +892,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -913,18 +913,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -934,18 +934,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/02/09/git-commands-2/index.html b/2014/02/09/git-commands-2/index.html index 3136b592..3d2a2ea0 100644 --- a/2014/02/09/git-commands-2/index.html +++ b/2014/02/09/git-commands-2/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/02/21/git-commands-3/index.html b/2014/02/21/git-commands-3/index.html index da37f647..60a008ff 100644 --- a/2014/02/21/git-commands-3/index.html +++ b/2014/02/21/git-commands-3/index.html @@ -840,18 +840,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -861,18 +861,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -882,18 +882,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -903,18 +903,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -924,18 +924,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/03/03/visual-git-guide-basic-usage/index.html b/2014/03/03/visual-git-guide-basic-usage/index.html index 32bbf3b6..e67914e0 100644 --- a/2014/03/03/visual-git-guide-basic-usage/index.html +++ b/2014/03/03/visual-git-guide-basic-usage/index.html @@ -834,18 +834,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -855,18 +855,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -876,18 +876,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -897,18 +897,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -918,18 +918,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/04/13/visual-git-guide-conventions/index.html b/2014/04/13/visual-git-guide-conventions/index.html index 98d61631..8c459f8e 100644 --- a/2014/04/13/visual-git-guide-conventions/index.html +++ b/2014/04/13/visual-git-guide-conventions/index.html @@ -811,18 +811,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -832,18 +832,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -853,18 +853,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -874,18 +874,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -895,18 +895,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/04/21/visual-git-guide-checkout-command/index.html b/2014/04/21/visual-git-guide-checkout-command/index.html index 3fc39e7e..cdbab970 100644 --- a/2014/04/21/visual-git-guide-checkout-command/index.html +++ b/2014/04/21/visual-git-guide-checkout-command/index.html @@ -822,18 +822,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -843,18 +843,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/05/01/visual-git-guide-cherry-pick-command/index.html b/2014/05/01/visual-git-guide-cherry-pick-command/index.html index a1a95f7f..f5f020f6 100644 --- a/2014/05/01/visual-git-guide-cherry-pick-command/index.html +++ b/2014/05/01/visual-git-guide-cherry-pick-command/index.html @@ -808,18 +808,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -829,18 +829,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -850,18 +850,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -871,18 +871,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -892,18 +892,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/05/03/visual-git-guide-commit-command/index.html b/2014/05/03/visual-git-guide-commit-command/index.html index f75bb437..71182a82 100644 --- a/2014/05/03/visual-git-guide-commit-command/index.html +++ b/2014/05/03/visual-git-guide-commit-command/index.html @@ -816,18 +816,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/05/20/visual-git-guide-diff-command/index.html b/2014/05/20/visual-git-guide-diff-command/index.html index 67ce3561..37d35bee 100644 --- a/2014/05/20/visual-git-guide-diff-command/index.html +++ b/2014/05/20/visual-git-guide-diff-command/index.html @@ -810,18 +810,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -831,18 +831,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -852,18 +852,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -873,18 +873,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -894,18 +894,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/06/11/visual-git-guide-merge-command/index.html b/2014/06/11/visual-git-guide-merge-command/index.html index c22d750f..0a5253d1 100644 --- a/2014/06/11/visual-git-guide-merge-command/index.html +++ b/2014/06/11/visual-git-guide-merge-command/index.html @@ -812,18 +812,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -833,18 +833,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -854,18 +854,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -875,18 +875,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -896,18 +896,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/06/13/visual-git-guide-rebase-command/index.html b/2014/06/13/visual-git-guide-rebase-command/index.html index f078552a..72e21844 100644 --- a/2014/06/13/visual-git-guide-rebase-command/index.html +++ b/2014/06/13/visual-git-guide-rebase-command/index.html @@ -814,18 +814,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/07/07/visual-git-guide-reset-command/index.html b/2014/07/07/visual-git-guide-reset-command/index.html index 13f71b7b..77a35025 100644 --- a/2014/07/07/visual-git-guide-reset-command/index.html +++ b/2014/07/07/visual-git-guide-reset-command/index.html @@ -815,18 +815,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -836,18 +836,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -857,18 +857,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -878,18 +878,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -899,18 +899,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/08/07/visual-git-guide-technical-notes/index.html b/2014/08/07/visual-git-guide-technical-notes/index.html index abec0507..feeb42a6 100644 --- a/2014/08/07/visual-git-guide-technical-notes/index.html +++ b/2014/08/07/visual-git-guide-technical-notes/index.html @@ -811,18 +811,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -832,18 +832,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -853,18 +853,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -874,18 +874,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -895,18 +895,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/11/13/building-blocks-of-android-application/index.html b/2014/11/13/building-blocks-of-android-application/index.html index 96baf011..410ded0a 100644 --- a/2014/11/13/building-blocks-of-android-application/index.html +++ b/2014/11/13/building-blocks-of-android-application/index.html @@ -832,18 +832,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -853,18 +853,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -874,18 +874,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -895,18 +895,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -916,18 +916,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/11/17/the-lifecycle-of-android-activity/index.html b/2014/11/17/the-lifecycle-of-android-activity/index.html index dba705af..30f5629e 100644 --- a/2014/11/17/the-lifecycle-of-android-activity/index.html +++ b/2014/11/17/the-lifecycle-of-android-activity/index.html @@ -806,18 +806,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -827,18 +827,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -848,18 +848,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -869,18 +869,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -890,18 +890,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/11/21/guava-optional/index.html b/2014/11/21/guava-optional/index.html index 6daa9cb3..95056af1 100644 --- a/2014/11/21/guava-optional/index.html +++ b/2014/11/21/guava-optional/index.html @@ -833,18 +833,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -854,18 +854,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -875,18 +875,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -896,18 +896,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -917,18 +917,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/11/26/guava-preconditions/index.html b/2014/11/26/guava-preconditions/index.html index 6b360481..354015e2 100644 --- a/2014/11/26/guava-preconditions/index.html +++ b/2014/11/26/guava-preconditions/index.html @@ -816,18 +816,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/11/27/guava-common-object-method/index.html b/2014/11/27/guava-common-object-method/index.html index 22b8578f..8296b062 100644 --- a/2014/11/27/guava-common-object-method/index.html +++ b/2014/11/27/guava-common-object-method/index.html @@ -817,18 +817,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -838,18 +838,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -859,18 +859,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -880,18 +880,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -901,18 +901,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/01/jsp-custom-tag/index.html b/2014/12/01/jsp-custom-tag/index.html index e4323163..eb22b3c1 100644 --- a/2014/12/01/jsp-custom-tag/index.html +++ b/2014/12/01/jsp-custom-tag/index.html @@ -824,18 +824,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -845,18 +845,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -866,18 +866,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -887,18 +887,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -908,18 +908,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/12/guava-ordering/index.html b/2014/12/12/guava-ordering/index.html index ed7f3620..d5819eb1 100644 --- a/2014/12/12/guava-ordering/index.html +++ b/2014/12/12/guava-ordering/index.html @@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -925,18 +925,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -946,18 +946,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/18/how-to-learn-any-language-in-six-months/index.html b/2014/12/18/how-to-learn-any-language-in-six-months/index.html index caf3ee58..3a1e5bbf 100644 --- a/2014/12/18/how-to-learn-any-language-in-six-months/index.html +++ b/2014/12/18/how-to-learn-any-language-in-six-months/index.html @@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/19/what-is-ractive-dot-js/index.html b/2014/12/19/what-is-ractive-dot-js/index.html index e822d4ca..dce76c7f 100644 --- a/2014/12/19/what-is-ractive-dot-js/index.html +++ b/2014/12/19/what-is-ractive-dot-js/index.html @@ -818,18 +818,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -839,18 +839,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -860,18 +860,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -881,18 +881,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -902,18 +902,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/22/what-is-cucumber/index.html b/2014/12/22/what-is-cucumber/index.html index da84319e..3e71b288 100644 --- a/2014/12/22/what-is-cucumber/index.html +++ b/2014/12/22/what-is-cucumber/index.html @@ -825,18 +825,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -846,18 +846,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -867,18 +867,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -888,18 +888,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -909,18 +909,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2014/12/26/how-to-install-sublime-text-plugin/index.html b/2014/12/26/how-to-install-sublime-text-plugin/index.html index 7f15feb3..37895170 100644 --- a/2014/12/26/how-to-install-sublime-text-plugin/index.html +++ b/2014/12/26/how-to-install-sublime-text-plugin/index.html @@ -815,18 +815,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -836,18 +836,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -857,18 +857,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -878,18 +878,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -899,18 +899,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/01/07/how-to-run-multiple-ie-version-on-same-cotim/index.html b/2015/01/07/how-to-run-multiple-ie-version-on-same-cotim/index.html index ecd24fe3..a7fe0428 100644 --- a/2015/01/07/how-to-run-multiple-ie-version-on-same-cotim/index.html +++ b/2015/01/07/how-to-run-multiple-ie-version-on-same-cotim/index.html @@ -820,18 +820,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -841,18 +841,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/01/13/a-new-way-for-developing-web-application-front-end/index.html b/2015/01/13/a-new-way-for-developing-web-application-front-end/index.html index a5a48f5c..0b769943 100644 --- a/2015/01/13/a-new-way-for-developing-web-application-front-end/index.html +++ b/2015/01/13/a-new-way-for-developing-web-application-front-end/index.html @@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/01/28/conditinal-stylesheets-for-ie-version/index.html b/2015/01/28/conditinal-stylesheets-for-ie-version/index.html index e657db0c..17f44b62 100644 --- a/2015/01/28/conditinal-stylesheets-for-ie-version/index.html +++ b/2015/01/28/conditinal-stylesheets-for-ie-version/index.html @@ -846,18 +846,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -867,18 +867,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -888,18 +888,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -909,18 +909,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -930,18 +930,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/01/28/css-tricks-for-ie6-ie7-ie8-ie9/index.html b/2015/01/28/css-tricks-for-ie6-ie7-ie8-ie9/index.html index d801d06c..c3e62387 100644 --- a/2015/01/28/css-tricks-for-ie6-ie7-ie8-ie9/index.html +++ b/2015/01/28/css-tricks-for-ie6-ie7-ie8-ie9/index.html @@ -809,18 +809,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -830,18 +830,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -851,18 +851,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -872,18 +872,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -893,18 +893,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/02/06/understading-mvc-mvp-and-mvvm-design-patterns/index.html b/2015/02/06/understading-mvc-mvp-and-mvvm-design-patterns/index.html index 8e9599d8..8a824870 100644 --- a/2015/02/06/understading-mvc-mvp-and-mvvm-design-patterns/index.html +++ b/2015/02/06/understading-mvc-mvp-and-mvvm-design-patterns/index.html @@ -847,18 +847,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -868,18 +868,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -889,18 +889,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -910,18 +910,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -931,18 +931,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/02/13/guave-immutable-collection/index.html b/2015/02/13/guave-immutable-collection/index.html index 685071d3..0b39f986 100644 --- a/2015/02/13/guave-immutable-collection/index.html +++ b/2015/02/13/guave-immutable-collection/index.html @@ -822,18 +822,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -843,18 +843,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/03/02/a-nice-web-design-tool-bracket/index.html b/2015/03/02/a-nice-web-design-tool-bracket/index.html index 996d6162..010d225e 100644 --- a/2015/03/02/a-nice-web-design-tool-bracket/index.html +++ b/2015/03/02/a-nice-web-design-tool-bracket/index.html @@ -823,18 +823,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/03/02/html5-new-common-attributes/index.html b/2015/03/02/html5-new-common-attributes/index.html index 98937031..6b4503bc 100644 --- a/2015/03/02/html5-new-common-attributes/index.html +++ b/2015/03/02/html5-new-common-attributes/index.html @@ -830,18 +830,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -851,18 +851,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -872,18 +872,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -893,18 +893,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -914,18 +914,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/03/03/html5-new-common-elements/index.html b/2015/03/03/html5-new-common-elements/index.html index c506d6cb..375cba83 100644 --- a/2015/03/03/html5-new-common-elements/index.html +++ b/2015/03/03/html5-new-common-elements/index.html @@ -823,18 +823,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/03/19/give-me-a-cup-of-coffeescript/index.html b/2015/03/19/give-me-a-cup-of-coffeescript/index.html index e0f707c0..6efe2327 100644 --- a/2015/03/19/give-me-a-cup-of-coffeescript/index.html +++ b/2015/03/19/give-me-a-cup-of-coffeescript/index.html @@ -818,18 +818,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -839,18 +839,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -860,18 +860,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -881,18 +881,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -902,18 +902,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/05/05/how-to-use-jdbc-in-spring/index.html b/2015/05/05/how-to-use-jdbc-in-spring/index.html index 550e4846..75489efd 100644 --- a/2015/05/05/how-to-use-jdbc-in-spring/index.html +++ b/2015/05/05/how-to-use-jdbc-in-spring/index.html @@ -831,18 +831,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -852,18 +852,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -873,18 +873,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -894,18 +894,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -915,18 +915,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/05/16/angularjs-angular-dot-extend/index.html b/2015/05/16/angularjs-angular-dot-extend/index.html index 9e764b0a..bbcf23ae 100644 --- a/2015/05/16/angularjs-angular-dot-extend/index.html +++ b/2015/05/16/angularjs-angular-dot-extend/index.html @@ -840,18 +840,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -861,18 +861,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -882,18 +882,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -903,18 +903,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -924,18 +924,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/05/16/sublime-text-keyboard-shortcuts-for-mac/index.html b/2015/05/16/sublime-text-keyboard-shortcuts-for-mac/index.html index 854a2832..cbfc550e 100644 --- a/2015/05/16/sublime-text-keyboard-shortcuts-for-mac/index.html +++ b/2015/05/16/sublime-text-keyboard-shortcuts-for-mac/index.html @@ -1156,18 +1156,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -1177,18 +1177,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -1198,18 +1198,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -1219,18 +1219,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -1240,18 +1240,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/05/19/destroy-your-website/index.html b/2015/05/19/destroy-your-website/index.html index 6749342b..b70992db 100644 --- a/2015/05/19/destroy-your-website/index.html +++ b/2015/05/19/destroy-your-website/index.html @@ -830,18 +830,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -851,18 +851,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -872,18 +872,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -893,18 +893,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -914,18 +914,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/05/20/analyze-call-function-of-javascript/index.html b/2015/05/20/analyze-call-function-of-javascript/index.html index cc2d3965..fe2d2bd9 100644 --- a/2015/05/20/analyze-call-function-of-javascript/index.html +++ b/2015/05/20/analyze-call-function-of-javascript/index.html @@ -826,18 +826,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -847,18 +847,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -868,18 +868,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -889,18 +889,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -910,18 +910,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/04/a-cool-theme-for-sublime-text/index.html b/2015/06/04/a-cool-theme-for-sublime-text/index.html index 2e20e340..673bc9fe 100644 --- a/2015/06/04/a-cool-theme-for-sublime-text/index.html +++ b/2015/06/04/a-cool-theme-for-sublime-text/index.html @@ -821,18 +821,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/04/spring-aop-what-is-aop/index.html b/2015/06/04/spring-aop-what-is-aop/index.html index 31512173..2cab9217 100644 --- a/2015/06/04/spring-aop-what-is-aop/index.html +++ b/2015/06/04/spring-aop-what-is-aop/index.html @@ -817,18 +817,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -838,18 +838,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -859,18 +859,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -880,18 +880,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -901,18 +901,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/04/the-cobalt2-theme-for-iterm2-and-zsh/index.html b/2015/06/04/the-cobalt2-theme-for-iterm2-and-zsh/index.html index f5e3100d..00b6aa2f 100644 --- a/2015/06/04/the-cobalt2-theme-for-iterm2-and-zsh/index.html +++ b/2015/06/04/the-cobalt2-theme-for-iterm2-and-zsh/index.html @@ -813,18 +813,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -834,18 +834,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -855,18 +855,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -876,18 +876,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -897,18 +897,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/05/spring-aop-aspect/index.html b/2015/06/05/spring-aop-aspect/index.html index 44e695c8..72c7bd3b 100644 --- a/2015/06/05/spring-aop-aspect/index.html +++ b/2015/06/05/spring-aop-aspect/index.html @@ -813,18 +813,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -834,18 +834,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -855,18 +855,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -876,18 +876,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -897,18 +897,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/05/spring-aop-before-advice/index.html b/2015/06/05/spring-aop-before-advice/index.html index e3c7b7d2..b14e422b 100644 --- a/2015/06/05/spring-aop-before-advice/index.html +++ b/2015/06/05/spring-aop-before-advice/index.html @@ -850,18 +850,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -871,18 +871,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -892,18 +892,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -913,18 +913,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -934,18 +934,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/06/spring-aop-after-advice/index.html b/2015/06/06/spring-aop-after-advice/index.html index 5f216ac1..9ee96a2e 100644 --- a/2015/06/06/spring-aop-after-advice/index.html +++ b/2015/06/06/spring-aop-after-advice/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/08/spring-aop-around-advice/index.html b/2015/06/08/spring-aop-around-advice/index.html index 23c80d3a..d3b2aa52 100644 --- a/2015/06/08/spring-aop-around-advice/index.html +++ b/2015/06/08/spring-aop-around-advice/index.html @@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/08/spring-aop-throws-advice/index.html b/2015/06/08/spring-aop-throws-advice/index.html index 260b4a5a..25c34096 100644 --- a/2015/06/08/spring-aop-throws-advice/index.html +++ b/2015/06/08/spring-aop-throws-advice/index.html @@ -845,18 +845,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -866,18 +866,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -887,18 +887,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -908,18 +908,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -929,18 +929,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/09/spring-aop-joint-point/index.html b/2015/06/09/spring-aop-joint-point/index.html index 64d5e1e3..e2c34f7b 100644 --- a/2015/06/09/spring-aop-joint-point/index.html +++ b/2015/06/09/spring-aop-joint-point/index.html @@ -815,18 +815,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -836,18 +836,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -857,18 +857,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -878,18 +878,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -899,18 +899,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/10/spring-aop-pointcut/index.html b/2015/06/10/spring-aop-pointcut/index.html index 41d16954..35ee38dd 100644 --- a/2015/06/10/spring-aop-pointcut/index.html +++ b/2015/06/10/spring-aop-pointcut/index.html @@ -822,18 +822,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -843,18 +843,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/11/spring-aop-static-pointcut/index.html b/2015/06/11/spring-aop-static-pointcut/index.html index 9d515c20..e88b1660 100644 --- a/2015/06/11/spring-aop-static-pointcut/index.html +++ b/2015/06/11/spring-aop-static-pointcut/index.html @@ -851,18 +851,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -872,18 +872,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -893,18 +893,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -914,18 +914,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -935,18 +935,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/12/spring-aop-dynamic-pointcut/index.html b/2015/06/12/spring-aop-dynamic-pointcut/index.html index 5c592e7c..461802af 100644 --- a/2015/06/12/spring-aop-dynamic-pointcut/index.html +++ b/2015/06/12/spring-aop-dynamic-pointcut/index.html @@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -921,18 +921,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/14/spring-aop-xml-schema/index.html b/2015/06/14/spring-aop-xml-schema/index.html index 78e8aaed..e22b320f 100644 --- a/2015/06/14/spring-aop-xml-schema/index.html +++ b/2015/06/14/spring-aop-xml-schema/index.html @@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -927,18 +927,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -948,18 +948,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/16/spring-aop-at-aspectj-annotation/index.html b/2015/06/16/spring-aop-at-aspectj-annotation/index.html index 0e186568..d9fb95cd 100644 --- a/2015/06/16/spring-aop-at-aspectj-annotation/index.html +++ b/2015/06/16/spring-aop-at-aspectj-annotation/index.html @@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -928,18 +928,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/06/30/implicit-submission-of-form-when-pressing-enter-key/index.html b/2015/06/30/implicit-submission-of-form-when-pressing-enter-key/index.html index e2260cca..43a8fc54 100644 --- a/2015/06/30/implicit-submission-of-form-when-pressing-enter-key/index.html +++ b/2015/06/30/implicit-submission-of-form-when-pressing-enter-key/index.html @@ -824,18 +824,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -845,18 +845,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -866,18 +866,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -887,18 +887,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -908,18 +908,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/07/30/using-tsd-to-improve-javascript-intellisense-of-ide/index.html b/2015/07/30/using-tsd-to-improve-javascript-intellisense-of-ide/index.html index 71970394..d4678970 100644 --- a/2015/07/30/using-tsd-to-improve-javascript-intellisense-of-ide/index.html +++ b/2015/07/30/using-tsd-to-improve-javascript-intellisense-of-ide/index.html @@ -854,18 +854,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -875,18 +875,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -896,18 +896,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -917,18 +917,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -938,18 +938,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/06/how-to-get-or-post-data-via-httpclient-with-proxy/index.html b/2015/08/06/how-to-get-or-post-data-via-httpclient-with-proxy/index.html index 5f93d5e4..4d145b8c 100644 --- a/2015/08/06/how-to-get-or-post-data-via-httpclient-with-proxy/index.html +++ b/2015/08/06/how-to-get-or-post-data-via-httpclient-with-proxy/index.html @@ -848,18 +848,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -869,18 +869,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -890,18 +890,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -911,18 +911,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -932,18 +932,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/12/ruby-practice-1/index.html b/2015/08/12/ruby-practice-1/index.html index 2bc8d255..27f8ac15 100644 --- a/2015/08/12/ruby-practice-1/index.html +++ b/2015/08/12/ruby-practice-1/index.html @@ -893,18 +893,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -914,18 +914,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -935,18 +935,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -956,18 +956,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -977,18 +977,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/13/ruby-practice-2/index.html b/2015/08/13/ruby-practice-2/index.html index 8ce8dbca..8562340d 100644 --- a/2015/08/13/ruby-practice-2/index.html +++ b/2015/08/13/ruby-practice-2/index.html @@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -940,18 +940,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -961,18 +961,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -982,18 +982,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/14/ruby-practice-3/index.html b/2015/08/14/ruby-practice-3/index.html index 8adc70d0..2370bbff 100644 --- a/2015/08/14/ruby-practice-3/index.html +++ b/2015/08/14/ruby-practice-3/index.html @@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -968,18 +968,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/17/ruby-practice-4/index.html b/2015/08/17/ruby-practice-4/index.html index 86919d07..a11134c8 100644 --- a/2015/08/17/ruby-practice-4/index.html +++ b/2015/08/17/ruby-practice-4/index.html @@ -894,18 +894,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -915,18 +915,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -936,18 +936,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -957,18 +957,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -978,18 +978,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/24/ruby-practice-5/index.html b/2015/08/24/ruby-practice-5/index.html index dfc0149e..27273e5e 100644 --- a/2015/08/24/ruby-practice-5/index.html +++ b/2015/08/24/ruby-practice-5/index.html @@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/08/25/ruby-practice-6/index.html b/2015/08/25/ruby-practice-6/index.html index 79ed60b7..b05ee237 100644 --- a/2015/08/25/ruby-practice-6/index.html +++ b/2015/08/25/ruby-practice-6/index.html @@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -968,18 +968,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -989,18 +989,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -1010,18 +1010,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/01/ruby-practice-7/index.html b/2015/09/01/ruby-practice-7/index.html index e25cb370..dd29e0fb 100644 --- a/2015/09/01/ruby-practice-7/index.html +++ b/2015/09/01/ruby-practice-7/index.html @@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/06/ruby-practice-8/index.html b/2015/09/06/ruby-practice-8/index.html index 81477413..a8bc225c 100644 --- a/2015/09/06/ruby-practice-8/index.html +++ b/2015/09/06/ruby-practice-8/index.html @@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/09/ruby-practice-9/index.html b/2015/09/09/ruby-practice-9/index.html index 58de1498..e878c868 100644 --- a/2015/09/09/ruby-practice-9/index.html +++ b/2015/09/09/ruby-practice-9/index.html @@ -912,18 +912,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -933,18 +933,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -954,18 +954,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -975,18 +975,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -996,18 +996,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/15/ruby-practice-10/index.html b/2015/09/15/ruby-practice-10/index.html index 39b450df..7d80cb98 100644 --- a/2015/09/15/ruby-practice-10/index.html +++ b/2015/09/15/ruby-practice-10/index.html @@ -853,18 +853,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -874,18 +874,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -895,18 +895,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -916,18 +916,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -937,18 +937,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/23/the-initialization-of-the-angular-ngmodel/index.html b/2015/09/23/the-initialization-of-the-angular-ngmodel/index.html index 373804ba..120a7eaa 100644 --- a/2015/09/23/the-initialization-of-the-angular-ngmodel/index.html +++ b/2015/09/23/the-initialization-of-the-angular-ngmodel/index.html @@ -838,18 +838,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -859,18 +859,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -880,18 +880,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -901,18 +901,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -922,18 +922,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/09/28/ruby-practice-11/index.html b/2015/09/28/ruby-practice-11/index.html index 2f341219..71ba5222 100644 --- a/2015/09/28/ruby-practice-11/index.html +++ b/2015/09/28/ruby-practice-11/index.html @@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -940,18 +940,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -961,18 +961,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/10/13/ruby-practice-12/index.html b/2015/10/13/ruby-practice-12/index.html index 0df63d03..96faa451 100644 --- a/2015/10/13/ruby-practice-12/index.html +++ b/2015/10/13/ruby-practice-12/index.html @@ -931,18 +931,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -952,18 +952,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -973,18 +973,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -994,18 +994,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -1015,18 +1015,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/10/26/ruby-practice-13/index.html b/2015/10/26/ruby-practice-13/index.html index 03adc84d..54f7b88e 100644 --- a/2015/10/26/ruby-practice-13/index.html +++ b/2015/10/26/ruby-practice-13/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/11/11/how-to-make-the-octopress-adapt-os-x-ei-capitan/index.html b/2015/11/11/how-to-make-the-octopress-adapt-os-x-ei-capitan/index.html index 2f776387..7b6890ff 100644 --- a/2015/11/11/how-to-make-the-octopress-adapt-os-x-ei-capitan/index.html +++ b/2015/11/11/how-to-make-the-octopress-adapt-os-x-ei-capitan/index.html @@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -921,18 +921,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -942,18 +942,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/11/12/ruby-practice-14/index.html b/2015/11/12/ruby-practice-14/index.html index a708e190..203f8474 100644 --- a/2015/11/12/ruby-practice-14/index.html +++ b/2015/11/12/ruby-practice-14/index.html @@ -843,18 +843,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -927,18 +927,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/11/17/ruby-practice-15/index.html b/2015/11/17/ruby-practice-15/index.html index 2c290a2a..d44c771e 100644 --- a/2015/11/17/ruby-practice-15/index.html +++ b/2015/11/17/ruby-practice-15/index.html @@ -852,18 +852,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -873,18 +873,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -894,18 +894,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -915,18 +915,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -936,18 +936,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/11/18/ruby-practice-16/index.html b/2015/11/18/ruby-practice-16/index.html index 875334fe..311f1fe3 100644 --- a/2015/11/18/ruby-practice-16/index.html +++ b/2015/11/18/ruby-practice-16/index.html @@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -926,18 +926,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -947,18 +947,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/11/22/code-review-with-pull-request/index.html b/2015/11/22/code-review-with-pull-request/index.html index a33aa099..0ce7e1f6 100644 --- a/2015/11/22/code-review-with-pull-request/index.html +++ b/2015/11/22/code-review-with-pull-request/index.html @@ -826,18 +826,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -847,18 +847,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -868,18 +868,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -889,18 +889,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -910,18 +910,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/12/05/using-nginx-in-mac-os-x/index.html b/2015/12/05/using-nginx-in-mac-os-x/index.html index 262d1e7f..4315167f 100644 --- a/2015/12/05/using-nginx-in-mac-os-x/index.html +++ b/2015/12/05/using-nginx-in-mac-os-x/index.html @@ -846,18 +846,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -867,18 +867,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -888,18 +888,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -909,18 +909,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -930,18 +930,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/12/17/ruby-practice-17/index.html b/2015/12/17/ruby-practice-17/index.html index 2e90d91a..978ae955 100644 --- a/2015/12/17/ruby-practice-17/index.html +++ b/2015/12/17/ruby-practice-17/index.html @@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -928,18 +928,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2015/12/28/ruby-practice-18/index.html b/2015/12/28/ruby-practice-18/index.html index 4e5d537d..9c283325 100644 --- a/2015/12/28/ruby-practice-18/index.html +++ b/2015/12/28/ruby-practice-18/index.html @@ -867,18 +867,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -888,18 +888,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -909,18 +909,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -930,18 +930,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -951,18 +951,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2016/01/12/how-to-fix-gem-filepermissionerror/index.html b/2016/01/12/how-to-fix-gem-filepermissionerror/index.html index 248ad6d2..cc8505ba 100644 --- a/2016/01/12/how-to-fix-gem-filepermissionerror/index.html +++ b/2016/01/12/how-to-fix-gem-filepermissionerror/index.html @@ -829,18 +829,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -850,18 +850,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -871,18 +871,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -892,18 +892,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -913,18 +913,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2016/01/25/shuang-zhi-zhen-suan-fa/index.html b/2016/01/25/shuang-zhi-zhen-suan-fa/index.html index 9c4736ad..5f41cf04 100644 --- a/2016/01/25/shuang-zhi-zhen-suan-fa/index.html +++ b/2016/01/25/shuang-zhi-zhen-suan-fa/index.html @@ -817,18 +817,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -838,18 +838,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -859,18 +859,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -880,18 +880,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -901,18 +901,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2016/02/05/shuang-zhi-zhen-suan-fa-zhi-liang-shu-zhi-he/index.html b/2016/02/05/shuang-zhi-zhen-suan-fa-zhi-liang-shu-zhi-he/index.html index 916241da..55c86270 100644 --- a/2016/02/05/shuang-zhi-zhen-suan-fa-zhi-liang-shu-zhi-he/index.html +++ b/2016/02/05/shuang-zhi-zhen-suan-fa-zhi-liang-shu-zhi-he/index.html @@ -818,18 +818,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -839,18 +839,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -860,18 +860,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -881,18 +881,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -902,18 +902,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2016/03/01/minimum-size-subarray-sum/index.html b/2016/03/01/minimum-size-subarray-sum/index.html index 2ff295bb..d08a2597 100644 --- a/2016/03/01/minimum-size-subarray-sum/index.html +++ b/2016/03/01/minimum-size-subarray-sum/index.html @@ -822,18 +822,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -843,18 +843,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -864,18 +864,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -885,18 +885,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -906,18 +906,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2017/02/23/yi-ge-mian-fei-gao-zhi-liang-de-tu-pian-fen-xiang-ping-tai/index.html b/2017/02/23/yi-ge-mian-fei-gao-zhi-liang-de-tu-pian-fen-xiang-ping-tai/index.html index 92a01378..5bddde71 100644 --- a/2017/02/23/yi-ge-mian-fei-gao-zhi-liang-de-tu-pian-fen-xiang-ping-tai/index.html +++ b/2017/02/23/yi-ge-mian-fei-gao-zhi-liang-de-tu-pian-fen-xiang-ping-tai/index.html @@ -816,18 +816,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2017/03/16/liao-liao-qian-duan-you-hua-shou-duan-fang-dou/index.html b/2017/03/16/liao-liao-qian-duan-you-hua-shou-duan-fang-dou/index.html index bb84bc60..5dadf34e 100644 --- a/2017/03/16/liao-liao-qian-duan-you-hua-shou-duan-fang-dou/index.html +++ b/2017/03/16/liao-liao-qian-duan-you-hua-shou-duan-fang-dou/index.html @@ -816,18 +816,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -837,18 +837,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2017/04/30/solid-principle/index.html b/2017/04/30/solid-principle/index.html index 4263e92d..cdd87474 100644 --- a/2017/04/30/solid-principle/index.html +++ b/2017/04/30/solid-principle/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2017/05/19/design-pattern-singleton/index.html b/2017/05/19/design-pattern-singleton/index.html index 14f2810e..e31fe844 100644 --- a/2017/05/19/design-pattern-singleton/index.html +++ b/2017/05/19/design-pattern-singleton/index.html @@ -824,18 +824,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -845,18 +845,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -866,18 +866,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -887,18 +887,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -908,18 +908,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2018/01/05/java-shuang-qin-weipai-jizi/index.html b/2018/01/05/java-shuang-qin-weipai-jizi/index.html index 98b16a4d..a914c34e 100644 --- a/2018/01/05/java-shuang-qin-weipai-jizi/index.html +++ b/2018/01/05/java-shuang-qin-weipai-jizi/index.html @@ -820,18 +820,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -841,18 +841,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2018/02/17/docker-vs-virtual-machine/index.html b/2018/02/17/docker-vs-virtual-machine/index.html index d0b28da7..43d4c076 100644 --- a/2018/02/17/docker-vs-virtual-machine/index.html +++ b/2018/02/17/docker-vs-virtual-machine/index.html @@ -858,18 +858,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -879,18 +879,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -900,18 +900,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -921,18 +921,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -942,18 +942,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2019/02/16/kafka-high-throughput-low-latency/index.html b/2019/02/16/kafka-high-throughput-low-latency/index.html index 73962492..d4b43db4 100644 --- a/2019/02/16/kafka-high-throughput-low-latency/index.html +++ b/2019/02/16/kafka-high-throughput-low-latency/index.html @@ -820,18 +820,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -841,18 +841,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2019/04/26/what-is-apache-flink/index.html b/2019/04/26/what-is-apache-flink/index.html index bab6f9e1..35e843d8 100644 --- a/2019/04/26/what-is-apache-flink/index.html +++ b/2019/04/26/what-is-apache-flink/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2019/07/02/microservice-and-service-mesh-for-modern-application/index.html b/2019/07/02/microservice-and-service-mesh-for-modern-application/index.html index fa9f5809..b5ba5c60 100644 --- a/2019/07/02/microservice-and-service-mesh-for-modern-application/index.html +++ b/2019/07/02/microservice-and-service-mesh-for-modern-application/index.html @@ -820,18 +820,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -841,18 +841,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/01/31/mysql-master-slave/index.html b/2020/01/31/mysql-master-slave/index.html index 3a3ac2b7..fd819fb7 100644 --- a/2020/01/31/mysql-master-slave/index.html +++ b/2020/01/31/mysql-master-slave/index.html @@ -832,18 +832,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -853,18 +853,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -874,18 +874,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -895,18 +895,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -916,18 +916,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/03/19/error-got-permission-denied-while-trying-to-connect-to-the-docker-daemon-socket/index.html b/2020/03/19/error-got-permission-denied-while-trying-to-connect-to-the-docker-daemon-socket/index.html index 4b2f0238..55fc09a1 100644 --- a/2020/03/19/error-got-permission-denied-while-trying-to-connect-to-the-docker-daemon-socket/index.html +++ b/2020/03/19/error-got-permission-denied-while-trying-to-connect-to-the-docker-daemon-socket/index.html @@ -836,18 +836,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -857,18 +857,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -878,18 +878,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -899,18 +899,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -920,18 +920,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/04/05/understanding-css-units-px-rem-em-vh-vw-ch/index.html b/2020/04/05/understanding-css-units-px-rem-em-vh-vw-ch/index.html index 2e07aa80..57327e15 100644 --- a/2020/04/05/understanding-css-units-px-rem-em-vh-vw-ch/index.html +++ b/2020/04/05/understanding-css-units-px-rem-em-vh-vw-ch/index.html @@ -835,18 +835,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -856,18 +856,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -877,18 +877,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -898,18 +898,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -919,18 +919,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/05/11/apache-parquet-efficient-storage-format/index.html b/2020/05/11/apache-parquet-efficient-storage-format/index.html index 3c0fe4eb..254c91ed 100644 --- a/2020/05/11/apache-parquet-efficient-storage-format/index.html +++ b/2020/05/11/apache-parquet-efficient-storage-format/index.html @@ -823,18 +823,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -844,18 +844,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -865,18 +865,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -886,18 +886,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -907,18 +907,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/05/23/what-is-apache-iceberg/index.html b/2020/05/23/what-is-apache-iceberg/index.html index acec86b9..04ab1338 100644 --- a/2020/05/23/what-is-apache-iceberg/index.html +++ b/2020/05/23/what-is-apache-iceberg/index.html @@ -871,18 +871,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -892,18 +892,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -913,18 +913,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -934,18 +934,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -955,18 +955,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/06/13/data-lake-vs-data-warehouse/index.html b/2020/06/13/data-lake-vs-data-warehouse/index.html index 0e0b92b4..ef7879b2 100644 --- a/2020/06/13/data-lake-vs-data-warehouse/index.html +++ b/2020/06/13/data-lake-vs-data-warehouse/index.html @@ -841,18 +841,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -862,18 +862,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -883,18 +883,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -904,18 +904,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -925,18 +925,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/07/05/ai-ml-dl/index.html b/2020/07/05/ai-ml-dl/index.html index 04d1b824..893c016f 100644 --- a/2020/07/05/ai-ml-dl/index.html +++ b/2020/07/05/ai-ml-dl/index.html @@ -821,18 +821,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -842,18 +842,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -863,18 +863,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -884,18 +884,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -905,18 +905,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2020/08/14/data-analysis-mean-median-mode/index.html b/2020/08/14/data-analysis-mean-median-mode/index.html index 73965929..45826de7 100644 --- a/2020/08/14/data-analysis-mean-median-mode/index.html +++ b/2020/08/14/data-analysis-mean-median-mode/index.html @@ -637,6 +637,10 @@

总结 + + Python + + @@ -690,15 +694,15 @@

总结
- +
- +
Next: -
Kubernetes - 现代容器编排平台
+
详解数据分析中的方差,标准差和异常值的使用
@@ -869,18 +873,18 @@

Andrewsy

- - +
- +
- 详解数据分析中的方差,标准差和异常值的使用 + Introduction to LangChain: Make AI Smarter and Easier to use
@@ -890,18 +894,18 @@

Andrewsy

- - +
- +
- Introduction to LangChain: Make AI Smarter and Easier to use + What is Prompt Engineering? Best Practices and Examples
@@ -911,18 +915,18 @@

Andrewsy

- - +
- +
- What is Prompt Engineering? Best Practices and Examples + Understanding the Azure OpenAI GPT-4 API Chat Role Usage
@@ -932,18 +936,18 @@

Andrewsy

- - +
- +
- Understanding the Azure OpenAI GPT-4 API Chat Role Usage + Data Analysis Chart by Generative AI
@@ -953,18 +957,18 @@

Andrewsy

- - +
- +
- Data Analysis Chart by Generative AI + The Intelligent SQL Generator base on Spring AI with AWS Bedrock
diff --git a/2024/11/15/data-analysis-standard-deviation-variance-outliers/index.html b/2020/09/03/data-analysis-standard-deviation-variance-outliers/index.html similarity index 97% rename from 2024/11/15/data-analysis-standard-deviation-variance-outliers/index.html rename to 2020/09/03/data-analysis-standard-deviation-variance-outliers/index.html index 6b9ac85d..4761ac83 100644 --- a/2024/11/15/data-analysis-standard-deviation-variance-outliers/index.html +++ b/2020/09/03/data-analysis-standard-deviation-variance-outliers/index.html @@ -516,7 +516,7 @@

- Published in:2024-11-15 | + Published in:2020-09-03 | @@ -637,6 +637,10 @@

总结 + + Python + + @@ -648,7 +652,7 @@

总结
  • Link: - https://stonefishy.github.io/2024/11/15/data-analysis-standard-deviation-variance-outliers/ + https://stonefishy.github.io/2020/09/03/data-analysis-standard-deviation-variance-outliers/
  • Copyright notice: @@ -670,24 +674,39 @@

    总结
    - +
    - +
    Prev: -
    Introduction to LangChain: Make AI Smarter and Easier to use
    +
    数据分析中的均值、中央値与众数
    + +
    +
    + +
    + +
    + Next: +
    data-analysis-z-score-for-outliers
    +
    + +
    +
    + @@ -854,18 +873,18 @@

    Andrewsy

    - - +
    @@ -875,18 +894,18 @@

    Andrewsy

    - - +
    @@ -896,18 +915,18 @@

    Andrewsy

    - - +
    @@ -917,18 +936,18 @@

    Andrewsy

    - - +
    @@ -938,18 +957,18 @@

    Andrewsy

    - - +
    diff --git a/2020/09/18/data-analysis-z-score-for-outliers/index.html b/2020/09/18/data-analysis-z-score-for-outliers/index.html new file mode 100644 index 00000000..c61db471 --- /dev/null +++ b/2020/09/18/data-analysis-z-score-for-outliers/index.html @@ -0,0 +1,2047 @@ + + + + + + + + + + + + + + data-analysis-z-score-for-outliers | Andrewsy's Space + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + + + + +
    +
    + + +
    +
    + + data-analysis-z-score-for-outliers + +
    + + + + + Published in:2020-09-18 | + + + + + + Category: + + + Data Analysis + + + + + + +
    + + Words: 940 | + + + + Reading time: 3min + + + +
    + + +
    + + + + +
    + + + + + +
    +
    +
    +
    +

    Z-score(标准分数)

    Z-score(标准分数)是数据分析中常用的一个标准化指标,用于衡量某个数据点与其所在数据集的平均值之间的偏离程度。具体而言,它表示一个数据点距离数据集均值的标准差数值。Z-score 计算的基本原理是将数据点转化为标准化值,从而便于比较不同数据集中的数据点。

    +

    Z-score的计算公式

    + +

    其中:

    +
    + :单个数据点的值。 +
    +
    + :数据集的均值(平均值)。 +
    +
    + :数据集的标准差。 +
    +

    解释

      +
    • :表示数据点 与数据集均值 的偏差,或称为该数据点与均值的差异。
    • +
    • :标准差,衡量数据集的离散程度。标准差越大,说明数据点之间的差异越大;标准差越小,说明数据点之间的差异越小。
    • +
    +

    Z-score 的意义

    Z-score 的意义主要有以下几点:

    +
      +
    • Z-score = 0:表示数据点与均值完全一致。
    • +
    • Z-score > 0:表示数据点大于均值,即位于均值右侧。
    • +
    • Z-score < 0:表示数据点小于均值,即位于均值左侧。
    • +
    • Z-score 的绝对值较大:表示数据点距离均值较远,离群点的可能性较大;例如,Z-score 大于 3 或小于 -3 的数据点通常被认为是异常值。
    • +
    +

    Z-score 的主要应用

      +
    1. 标准化数据:通过 Z-score 可以将数据标准化,使得不同量纲、不同范围的数据具有可比性。例如,在机器学习中,许多算法(如 KNN、SVM)对数据的尺度敏感,因此需要对数据进行标准化处理。

      +
    2. +
    3. 异常值检测:Z-score 可以用来识别数据中的异常值。通常,当 Z-score 的绝对值大于 3 时,认为该数据点为异常值(离群点)。

      +
    4. +
    5. 比较不同数据集:如果两个数据集具有不同的均值和标准差,可以使用 Z-score 对它们进行比较,便于判断哪个数据点在其各自数据集中的位置更加偏离均值。

      +
    6. +
    +

    举个例子

    假设我们有一个包含学生成绩的数据集 [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]。我们希望计算数据点 200 的 Z-score,并判断它是否为异常值。

    +

    数学计算公式

      +
    1. 计算均值

      + +
    2. +
    3. 计算标准差
      标准差的计算公式为:

      + +

      代入数据集计算:

      + +
    4. +
    5. 计算 Z-score

      +
    6. +
    +

    Python 代码

    在Python中,我们可以使用 numpyscipy 库来计算 Z-score。

    +
    1
    2
    3
    4
    5
    6
    7
    import numpy as np
    import pandas as pd
    from scipy import stats
    import matplotlib.pyplot as plt

    # 创建数据集:一组学生成绩,其中包括异常值200
    data = [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]
    + +

    计算数据集的均值和标准差和Z-score:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    # 计算均值
    mean = np.mean(data)
    print("均值 (Mean):", mean)

    # 计算标准差
    std = np.std(data)
    print("标准差 (Standard Deviation):", std)

    # 计算 Z-score
    z_scores = stats.zscore(data)
    print("200数值的Z-score:", z_scores[-1])
    + +

    输出:

    +
    1
    2
    3
    均值 (Mean): 94.375
    标准差 (Standard Deviation): 27.959513139538036
    200数值的Z-score: 3.7777839504162842
    + +

    所以,数据点 200 的 Z-score 为 3.7777839504162842,按照 Z-score 的定义,它位于数据集的右侧,距离均值较远 ( Z-score 大于3或者小于-3 ),因此被认为是异常值。

    +

    总结

    Z-score 是衡量数据点相对于数据集平均水平偏差程度的标准化指标,广泛应用于数据预处理异常值检测以及统计分析等领域。通过 Z-score,我们可以将不同规模、不同单位的数据转换为相同的标准尺度,方便进行比较和进一步分析。

    + +
    + + +
    + + + +
    + + + + +
    +
    + +
    + +
    + Prev: +
    详解数据分析中的方差,标准差和异常值的使用
    +
    + + +
    +
    + + + + +
    +
    + +
    + +
    + Next: +
    Kubernetes - 现代容器编排平台
    +
    + +
    +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + +
    + + + + +
    + + + + + + +
    + + +
    + +
  • + +
  • +
    +
    + + + + + + + + + + +
    +
    + + + + + + +
    + + + + + + + + + + +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + +
    + + + + + + + + + \ No newline at end of file diff --git a/2021/01/03/kubernetes-a-modern-container-orchestration-platform/index.html b/2021/01/03/kubernetes-a-modern-container-orchestration-platform/index.html index 684d1c00..f678ff98 100644 --- a/2021/01/03/kubernetes-a-modern-container-orchestration-platform/index.html +++ b/2021/01/03/kubernetes-a-modern-container-orchestration-platform/index.html @@ -658,16 +658,16 @@

    最后
    - +
    - +
    Prev: -
    数据分析中的均值、中央値与众数
    +
    data-analysis-z-score-for-outliers
    @@ -857,18 +857,18 @@

    Andrewsy

    - - +
    @@ -878,18 +878,18 @@

    Andrewsy

    - - +
    @@ -899,18 +899,18 @@

    Andrewsy

    - - +
    @@ -920,18 +920,18 @@

    Andrewsy

    - - +
    @@ -941,18 +941,18 @@

    Andrewsy

    - - +
    diff --git a/2021/02/15/deep-understanding-the-resource-type-of-the-kubernetes/index.html b/2021/02/15/deep-understanding-the-resource-type-of-the-kubernetes/index.html index 2d5cd213..22cad8a7 100644 --- a/2021/02/15/deep-understanding-the-resource-type-of-the-kubernetes/index.html +++ b/2021/02/15/deep-understanding-the-resource-type-of-the-kubernetes/index.html @@ -882,18 +882,18 @@

    Andrewsy

    - - +
    @@ -903,18 +903,18 @@

    Andrewsy

    - - +
    @@ -924,18 +924,18 @@

    Andrewsy

    - - +
    @@ -945,18 +945,18 @@

    Andrewsy

    - - +
    @@ -966,18 +966,18 @@

    Andrewsy

    - - +
    diff --git a/2021/03/02/kubernetes-rbac/index.html b/2021/03/02/kubernetes-rbac/index.html index 1c33d35f..b720a38f 100644 --- a/2021/03/02/kubernetes-rbac/index.html +++ b/2021/03/02/kubernetes-rbac/index.html @@ -836,18 +836,18 @@

    Andrewsy

    - - +
    @@ -857,18 +857,18 @@

    Andrewsy

    - - +
    @@ -878,18 +878,18 @@

    Andrewsy

    - - +
    @@ -899,18 +899,18 @@

    Andrewsy

    - - +
    @@ -920,18 +920,18 @@

    Andrewsy

    - - +
    diff --git a/2021/03/22/kubernetes-cluster-role-for-service-account/index.html b/2021/03/22/kubernetes-cluster-role-for-service-account/index.html index 0cbb82cc..aab1e33a 100644 --- a/2021/03/22/kubernetes-cluster-role-for-service-account/index.html +++ b/2021/03/22/kubernetes-cluster-role-for-service-account/index.html @@ -876,18 +876,18 @@

    Andrewsy

    - - +
    @@ -897,18 +897,18 @@

    Andrewsy

    - - +
    @@ -918,18 +918,18 @@

    Andrewsy

    - - +
    @@ -939,18 +939,18 @@

    Andrewsy

    - - +
    @@ -960,18 +960,18 @@

    Andrewsy

    - - +
    diff --git a/2021/04/12/kubernetes-scale-deployment/index.html b/2021/04/12/kubernetes-scale-deployment/index.html index 5038f86a..b090bb01 100644 --- a/2021/04/12/kubernetes-scale-deployment/index.html +++ b/2021/04/12/kubernetes-scale-deployment/index.html @@ -853,18 +853,18 @@

    Andrewsy

    - - +
    @@ -874,18 +874,18 @@

    Andrewsy

    - - +
    @@ -895,18 +895,18 @@

    Andrewsy

    - - +
    @@ -916,18 +916,18 @@

    Andrewsy

    - - +
    @@ -937,18 +937,18 @@

    Andrewsy

    - - +
    diff --git a/2021/05/03/metrics-api-not-available-in-kubernetes-minikube/index.html b/2021/05/03/metrics-api-not-available-in-kubernetes-minikube/index.html index 47c370dc..00d071c2 100644 --- a/2021/05/03/metrics-api-not-available-in-kubernetes-minikube/index.html +++ b/2021/05/03/metrics-api-not-available-in-kubernetes-minikube/index.html @@ -857,18 +857,18 @@

    Andrewsy

    - - +
    @@ -878,18 +878,18 @@

    Andrewsy

    - - +
    @@ -899,18 +899,18 @@

    Andrewsy

    - - +
    @@ -920,18 +920,18 @@

    Andrewsy

    - - +
    @@ -941,18 +941,18 @@

    Andrewsy

    - - +
    diff --git a/2021/05/17/how-to-set-up-network-policy-to-allow-access-pods-of-namespace-from-another-namespace-in-kubernetes/index.html b/2021/05/17/how-to-set-up-network-policy-to-allow-access-pods-of-namespace-from-another-namespace-in-kubernetes/index.html index cab9cf82..3b89d1a1 100644 --- a/2021/05/17/how-to-set-up-network-policy-to-allow-access-pods-of-namespace-from-another-namespace-in-kubernetes/index.html +++ b/2021/05/17/how-to-set-up-network-policy-to-allow-access-pods-of-namespace-from-another-namespace-in-kubernetes/index.html @@ -879,18 +879,18 @@

    Andrewsy

    - - +
    @@ -900,18 +900,18 @@

    Andrewsy

    - - +
    @@ -921,18 +921,18 @@

    Andrewsy

    - - +
    @@ -942,18 +942,18 @@

    Andrewsy

    - - +
    @@ -963,18 +963,18 @@

    Andrewsy

    - - +
    diff --git a/2021/06/09/kubernetes-expose-service/index.html b/2021/06/09/kubernetes-expose-service/index.html index 13b05da0..1eb715a7 100644 --- a/2021/06/09/kubernetes-expose-service/index.html +++ b/2021/06/09/kubernetes-expose-service/index.html @@ -844,18 +844,18 @@

    Andrewsy

    - - +
    @@ -865,18 +865,18 @@

    Andrewsy

    - - +
    @@ -886,18 +886,18 @@

    Andrewsy

    - - +
    @@ -907,18 +907,18 @@

    Andrewsy

    - - +
    @@ -928,18 +928,18 @@

    Andrewsy

    - - +
    diff --git a/2021/06/28/how-to-maintan-the-running-node-in-kubernetes/index.html b/2021/06/28/how-to-maintan-the-running-node-in-kubernetes/index.html index 3b300706..ebaddb66 100644 --- a/2021/06/28/how-to-maintan-the-running-node-in-kubernetes/index.html +++ b/2021/06/28/how-to-maintan-the-running-node-in-kubernetes/index.html @@ -856,18 +856,18 @@

    Andrewsy

    - - +
    @@ -877,18 +877,18 @@

    Andrewsy

    - - +
    @@ -898,18 +898,18 @@

    Andrewsy

    - - +
    @@ -919,18 +919,18 @@

    Andrewsy

    - - +
    @@ -940,18 +940,18 @@

    Andrewsy

    - - +
    diff --git a/2021/07/13/iac-tools-terraform-vs-pulumi/index.html b/2021/07/13/iac-tools-terraform-vs-pulumi/index.html index 40734793..4025141b 100644 --- a/2021/07/13/iac-tools-terraform-vs-pulumi/index.html +++ b/2021/07/13/iac-tools-terraform-vs-pulumi/index.html @@ -881,18 +881,18 @@

    Andrewsy

    - - +
    @@ -902,18 +902,18 @@

    Andrewsy

    - - +
    @@ -923,18 +923,18 @@

    Andrewsy

    - - +
    @@ -944,18 +944,18 @@

    Andrewsy

    - - +
    @@ -965,18 +965,18 @@

    Andrewsy

    - - +
    diff --git a/2022/02/17/aws-iamyuan-li-yi-ji-ru-he-shi-yong/index.html b/2022/02/17/aws-iamyuan-li-yi-ji-ru-he-shi-yong/index.html index d779e5bd..e0e46a8d 100644 --- a/2022/02/17/aws-iamyuan-li-yi-ji-ru-he-shi-yong/index.html +++ b/2022/02/17/aws-iamyuan-li-yi-ji-ru-he-shi-yong/index.html @@ -823,18 +823,18 @@

    Andrewsy

    - - +
    @@ -844,18 +844,18 @@

    Andrewsy

    - - +
    @@ -865,18 +865,18 @@

    Andrewsy

    - - +
    @@ -886,18 +886,18 @@

    Andrewsy

    - - +
    @@ -907,18 +907,18 @@

    Andrewsy

    - - +
    diff --git a/2022/03/02/aws-ec2de-xiang-xi-jie-shao-he-ji-fei-mo-shi/index.html b/2022/03/02/aws-ec2de-xiang-xi-jie-shao-he-ji-fei-mo-shi/index.html index f2b28352..1cb6cfe8 100644 --- a/2022/03/02/aws-ec2de-xiang-xi-jie-shao-he-ji-fei-mo-shi/index.html +++ b/2022/03/02/aws-ec2de-xiang-xi-jie-shao-he-ji-fei-mo-shi/index.html @@ -847,18 +847,18 @@

    Andrewsy

    - - +
    @@ -868,18 +868,18 @@

    Andrewsy

    - - +
    @@ -889,18 +889,18 @@

    Andrewsy

    - - +
    @@ -910,18 +910,18 @@

    Andrewsy

    - - +
    @@ -931,18 +931,18 @@

    Andrewsy

    - - +
    diff --git a/2022/04/08/shen-me-shi-aws-ec2-hibernation/index.html b/2022/04/08/shen-me-shi-aws-ec2-hibernation/index.html index 77fa062a..01d86075 100644 --- a/2022/04/08/shen-me-shi-aws-ec2-hibernation/index.html +++ b/2022/04/08/shen-me-shi-aws-ec2-hibernation/index.html @@ -819,18 +819,18 @@

    Andrewsy

    - - +
    @@ -840,18 +840,18 @@

    Andrewsy

    - - +
    @@ -861,18 +861,18 @@

    Andrewsy

    - - +
    @@ -882,18 +882,18 @@

    Andrewsy

    - - +
    @@ -903,18 +903,18 @@

    Andrewsy

    - - +
    diff --git a/2022/05/01/what-is-aws-ebs-and-how-to-use-it/index.html b/2022/05/01/what-is-aws-ebs-and-how-to-use-it/index.html index 0221b3bf..e3bfa73a 100644 --- a/2022/05/01/what-is-aws-ebs-and-how-to-use-it/index.html +++ b/2022/05/01/what-is-aws-ebs-and-how-to-use-it/index.html @@ -824,18 +824,18 @@

    Andrewsy

    - - +
    @@ -845,18 +845,18 @@

    Andrewsy

    - - +
    @@ -866,18 +866,18 @@

    Andrewsy

    - - +
    @@ -887,18 +887,18 @@

    Andrewsy

    - - +
    @@ -908,18 +908,18 @@

    Andrewsy

    - - +
    diff --git a/2022/06/13/understanding-aws-ec2-instance-store/index.html b/2022/06/13/understanding-aws-ec2-instance-store/index.html index b5a4ab7c..a4f3a30f 100644 --- a/2022/06/13/understanding-aws-ec2-instance-store/index.html +++ b/2022/06/13/understanding-aws-ec2-instance-store/index.html @@ -562,27 +562,27 @@

    Amazon Web Services (AWS) 的弹性Cloud实例(EC2)为用户提供了多种存储选项,其中之一是实例存储(Instance Store),也被称为本地实例存储或瞬态存储。本文将深入研究 AWS EC2 实例存储,包括其定义、特点、使用场景以及最佳实践。

    -

    什么是 EC2 实例存储?


    +

    什么是 EC2 实例存储?


    EC2 实例存储是 EC2 实例上提供的临时、本地存储选项。这些存储卷是物理存储设备(如硬盘驱动器)的一部分,直接连接到宿主实例,而不是通过网络进行访问。实例存储通常提供了非常高的性能,适用于需要低延迟和高吞吐量的工作负载。

    AWS EC2 Instance Store IOPS

    -

    实例存储的特点


    +

    实例存储的特点


    临时性:实例存储是临时的,与 EC2 实例的生命周期紧密相连。当 EC2 实例停止、终止或失败时,存储中的数据将不再可用。

    高性能:实例存储通常提供非常高的 I/O 性能,适用于需要大量读写操作的应用程序,如数据库缓存或临时计算。

    低延迟:由于实例存储直接连接到宿主实例,因此具有非常低的访问延迟,适用于对速度要求极高的工作负载。

    不同于 Amazon EBS, 与 Amazon Elastic Block Store(EBS)不同,实例存储不需要预配,也没有额外的费用,但缺乏 EBS 提供的持久性和数据备份功能。

    -

    使用场景


    +

    使用场景


    缓存层:实例存储适用于临时数据,如缓存层。通过将缓存存储在实例存储上,可以提高读取速度和降低后端存储负担。

    临时计算:对于需要执行大规模数据处理的任务,实例存储可以用作临时工作空间,以加速计算过程。

    日志存储:对于需要快速记录大量日志数据的应用程序,实例存储可以提供高性能的日志存储解决方案。

    -

    最佳实践


    +

    最佳实践


    备份重要数据:由于实例存储是临时性的,重要数据需要备份到持久性存储(如 Amazon EBS 或 Amazon S3)以防止数据丢失。

    了解生命周期:在使用实例存储时,了解 EC2 实例的生命周期非常重要。确保存储中的数据不会在实例终止时丢失。

    监控性能:实例存储通常提供高性能,但仍然需要监控其性能以确保正常运行。

    -

    总结


    +

    总结


    AWS EC2 实例存储是一种适用于需要高性能和低延迟的临时数据存储的选择。尽管它不适用于持久性数据存储,但在特定场景下,如缓存、临时计算和日志存储,它可以提供卓越的性能和效率。使用实例存储时,请谨记存储临时性质,并采取适当的备份和监控措施,以确保数据的可用性和完整性。
    @@ -797,7 +797,7 @@

    Andrewsy

    @@ -851,18 +851,18 @@

    Andrewsy

    - - +
    @@ -872,18 +872,18 @@

    Andrewsy

    - - +
    @@ -893,18 +893,18 @@

    Andrewsy

    - - +
    @@ -914,18 +914,18 @@

    Andrewsy

    - - +
    diff --git a/2022/07/19/how-to-backup-and-restore-aws-ebs/index.html b/2022/07/19/how-to-backup-and-restore-aws-ebs/index.html index c73bddca..bfa44476 100644 --- a/2022/07/19/how-to-backup-and-restore-aws-ebs/index.html +++ b/2022/07/19/how-to-backup-and-restore-aws-ebs/index.html @@ -562,10 +562,10 @@

    在Amazon Web Services(AWS)中,Snapshot是一种备份和恢复Amazon Elastic Block Store(EBS)卷数据的关键工具。
    AWS EBS Snapshot

    -

    什么是Snapshot


    +

    什么是Snapshot


    概念

    Snapshot是EBS卷的点对点备份,它记录了卷的特定时刻的状态,包括数据、配置和元数据。快照是存储在Amazon S3中的,因此具有高可靠性和持久性。

    -

    特点和功能


    +

    特点和功能


    持久性

    Snapshot是持久性的,一旦创建,它们会一直存在,即使原始EBS卷被删除也是如此。

    增量备份

    快照仅捕获自上一个快照以来发生的更改,这降低了备份的成本和时间。

    @@ -574,31 +574,31 @@

    复制到其他区域

    可以将Snapshot复制到其他AWS区域,以增加数据的可用性和灾难恢复选项。

    创建新EBS卷

    可以使用Snapshot创建新的EBS卷,这对于在不同EC2实例之间共享数据非常有用。

    自动快照策略

    可以设置自动快照策略,以定期创建快照,从而实现自动备份。

    -

    如何创建和使用Snapshot


    +

    如何创建和使用Snapshot


    创建Snapshot

    在AWS管理控制台上,可以选择要备份的EBS卷,然后创建Snapshot。也可以使用AWS命令行工具或SDK来创建Snapshot。

    备份策略

    可以选择手动创建Snapshot,也可以设置自动快照策略来定期备份数据。

    恢复数据

    如果需要,可以使用Snapshot还原数据。可以创建新的EBS卷,然后从快照还原数据,或者将快照直接附加到现有EBS卷上。

    数据保护

    Snapshot是数据保护的关键,它可以防止因数据丢失或损坏而引发的灾难情况。

    数据复制和迁移

    可以将Snapshot复制到其他AWS区域或AWS账户,以实现数据复制和迁移。

    -

    快照的应用场景


    +

    快照的应用场景


    数据备份和恢复

    主要用于备份重要的数据,以便在数据丢失或损坏时能够迅速恢复。

    点对点复制和迁移

    可以将快照复制到不同的 AWS 区域或 AWS 账户,以实现数据的复制和迁移。

    测试和开发

    创建快照可以帮助在不影响生产环境的情况下为测试和开发环境提供实验数据。

    版本控制和数据恢复

    可以使用快照来实现数据版本控制,允许在不同时间点恢复到不同的数据状态。

    数据分析和报告

    可以创建快照以便进行数据分析、生成报告或生成数据副本以供其他用途使用。

    -

    快照的定价


    +

    快照的定价


    会被收费用于创建和保留快照的存储空间。快照的价格取决于存储的数据量。AWS 还提供了一些定价选项,如创建和保留快照的频率。具体查看 AWS 官方网站的定价详情获取最新信息。

    -

    最佳实践和注意事项


    +

    最佳实践和注意事项


    定期创建快照

    建议定期创建快照,以确保数据的定期备份和恢复能力。

    标记和命名

    为快照提供有意义的标记和命名,以便轻松识别和管理。

    自动化备份策略

    使用 AWS 的自动备份策略来定期创建快照,减少人工干预。

    根据需求调整快照

    根据数据的重要性和变化频率,调整快照的保留策略。

    监控和警报

    设置监控和警报,以便在快照创建或数据丢失时及时获得通知。

    -

    总结


    +

    总结


    总之,AWS Snapshot 用于EBS卷备份和数据保护的关键工具。它具有高度的持久性和可靠性,并支持数据版本控制、恢复、复制和迁移。通过定期创建Snapshot,可以确保数据的安全性和可用性

    @@ -813,7 +813,7 @@

    Andrewsy

    @@ -867,18 +867,18 @@

    Andrewsy

    - - +
    @@ -888,18 +888,18 @@

    Andrewsy

    - - +
    @@ -909,18 +909,18 @@

    Andrewsy

    - - +
    @@ -930,18 +930,18 @@

    Andrewsy

    - - +
    diff --git a/2022/08/13/the-elastic-file-system/index.html b/2022/08/13/the-elastic-file-system/index.html index e51c857d..7dc0c330 100644 --- a/2022/08/13/the-elastic-file-system/index.html +++ b/2022/08/13/the-elastic-file-system/index.html @@ -563,7 +563,7 @@

    在Cloud时代,数据的管理和共享至关重要。AWS Elastic File System(EFS)是 Amazon Web Services(AWS)提供的一项云存储服务,旨在满足企业和开发者对可扩展、高可用性文件存储的需求。本文将深入探讨 AWS EFS 的特点、优势、使用案例以及如何开始使用它。

    AWS EFS

    -

    AWS EFS 概览


    +

    AWS EFS 概览


    AWS EFS 是一种托管的网络文件存储服务,旨在提供可扩展、高可用性的文件系统,以满足各种应用程序和工作负载的存储需求。 以下是 AWS EFS 的关键特点: @@ -574,7 +574,7 @@

    AWS EFS 概览


    多种访问协议:EFS 支持多种文件访问协议,包括 NFSv4、NFSv3 和 Amazon EFS 文件系统客户端。

    安全性:EFS 文件系统支持 Amazon VPC(Virtual Private Cloud)网络隔离,以确保数据的安全性和隐私性。

    AWS EFS IA

    -

    AWS EFS 的优势


    +

    AWS EFS 的优势


    为什么选择 AWS EFS 作为文件存储解决方案? 以下是一些显著的优势:

    可扩展性:EFS 自动扩展,因此您不必担心文件系统的容量限制。它可以根据需要增加存储空间。

    @@ -582,7 +582,7 @@

    AWS EFS 的优势

    共享性:多个 EC2 实例可以同时访问同一文件系统,使其适用于需要多个实例之间共享文件的应用程序。

    强一致性:EFS 提供强一致性,确保多个实例并发写入或读取文件时的数据一致性。

    灵活性:您可以根据需求创建和管理多个文件系统,每个文件系统可以有不同的权限和访问策略。

    -

    AWS EFS 的使用场景


    +

    AWS EFS 的使用场景


    AWS EFS 适用于许多不同的使用场景,包括但不限于:

    Web 服务器:EFS 可用于存储 Web 服务器的静态内容、日志文件和配置文件,以实现高可用性和可扩展性。

    @@ -590,7 +590,7 @@

    AWS EFS 的使用

    大数据分析:EFS 可用于存储大数据分析工作负载的输入数据和输出结果,支持多个分析节点的并发访问。

    应用程序共享:EFS 使不同应用程序之间可以轻松地共享文件,适用于微服务架构和多个应用程序共存的情况。

    开发和测试环境:开发人员可以使用 EFS 存储开发和测试环境的代码和资源,确保一致的开发和测试数据。

    -

    开始使用 AWS EFS


    +

    开始使用 AWS EFS


    要开始使用 AWS EFS,您可以按照以下步骤操作:

      @@ -600,7 +600,7 @@

      开始使用 AWS EFS<
    1. 将文件系统挂载到 EC2 实例:在您的 EC2 实例上挂载 EFS 文件系统,使实例能够访问共享文件。
    2. 开始使用:将您的应用程序或工作负载配置为使用挂载的 EFS 文件系统。
    -

    总结


    +

    总结


    AWS Elastic File System(EFS)是一项强大的云文件存储服务,为各种应用程序和业务提供可扩展,高可能性的文件存储功能。

    @@ -818,7 +818,7 @@

    Andrewsy

    @@ -872,18 +872,18 @@

    Andrewsy

    - - +
    @@ -893,18 +893,18 @@

    Andrewsy

    - - +
    @@ -914,18 +914,18 @@

    Andrewsy

    - - +
    @@ -935,18 +935,18 @@

    Andrewsy

    - - +
    diff --git a/2022/09/21/aws-ebs-vs-aws-efs/index.html b/2022/09/21/aws-ebs-vs-aws-efs/index.html index 53131d8e..095f40cb 100644 --- a/2022/09/21/aws-ebs-vs-aws-efs/index.html +++ b/2022/09/21/aws-ebs-vs-aws-efs/index.html @@ -561,57 +561,57 @@

    -

    Introduction


    +

    Introduction


    Amazon Web Services (AWS) offers a wide range of storage solutions to cater to the diverse needs of businesses and developers. Two popular options are Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS). While both provide storage capabilities within the AWS ecosystem, they serve different purposes and have distinct characteristics. In this blog post, we will explore the key differences between AWS EBS and AWS EFS to help you make an informed choice for your specific use case.

    -

    Amazon Elastic Block Store (EBS)


    +

    Amazon Elastic Block Store (EBS)


    AWS EBS
    AWS EBS
    Amazon Elastic Block Store (EBS) is a block-level storage service that provides durable and high-performance storage volumes that can be attached to Amazon Elastic Compute Cloud (EC2) instances..

    Here are some of the main characteristics and use cases of AWS EBS.

    -

    Block Storage


    +

    Block Storage


    EBS provides block storage, which means it is best suited for scenarios where you need to store data at the block level, such as databases and applications that require direct access to disk devices.

    -

    Low-Latency Performance


    +

    Low-Latency Performance


    EBS volumes offer low-latency, high-throughput performance, making them ideal for I/O-intensive workloads where rapid data access is crucial.

    -

    Data Persistence


    +

    Data Persistence


    EBS volumes are persistent, meaning the data stored on them remains intact even when the associated EC2 instance is stopped or terminated. This is useful for maintaining critical data.

    -

    Availability and Redundancy


    +

    Availability and Redundancy


    EBS volumes can be replicated within a specific
    Availability Zone (AZ) for redundancy, but they are not natively designed for cross-AZ or cross-region redundancy. For cross-AZ redundancy, you need to set up additional configurations.

    -

    Cost


    +

    Cost


    You pay for the provisioned capacity of the EBS volume, regardless of whether it is in use or not. This can make it cost-effective for certain use cases but might require careful capacity planning.

    -

    Amazon Elastic File System (EFS)


    +

    Amazon Elastic File System (EFS)


    AWS EFS
    AWS EFS
    Amazon Elastic File System (EFS) is a fully managed

    Here are the key attributes and use cases of AWS EFS:

    -

    File Storage


    +

    File Storage


    EFS offers file-level storage, making it suitable for scenarios where multiple instances need shared access to the same data, such as web applications, content management systems, and shared repositories.

    -

    Scalability


    +

    Scalability


    EFS is designed to scale automatically as your storage needs grow. It can handle a dynamic number of EC2 instances concurrently, making it a great choice for applications with varying workloads.

    -

    Cross-AZ and Cross-Region


    +

    Cross-AZ and Cross-Region


    EFS provides built-in redundancy and can be accessed across multiple Availability Zones and even across regions. This ensures high availability and disaster recovery capabilities.

    -

    Pay-as-You-Go


    +

    Pay-as-You-Go


    With EFS, you pay for the storage capacity you use, making it a cost-effective option for applications with fluctuating storage requirements.

    -

    Simplified Management


    +

    Simplified Management


    EFS takes care of the underlying infrastructure and scaling, allowing you to focus on your applications without worrying about managing storage hardware.

    -

    Choosing Between EBS and EFS


    +

    Choosing Between EBS and EFS


    To decide between AWS EBS and AWS EFS, consider the following factors:

    -

    Use Case


    +

    Use Case


    Determine whether your application requires block-level storage (EBS) or file-level storage (EFS).

    -

    Performance Requirements


    +

    Performance Requirements


    Assess your performance needs. EBS is often preferred for high-performance workloads, while EFS offers good performance for a wide range of applications.

    -

    Scalability


    +

    Scalability


    Consider whether your storage needs are likely to scale over time. EFS is designed for easy scalability.

    -

    Availability and Redundancy


    +

    Availability and Redundancy


    If high availability and redundancy are crucial, EFS may be a better choice due to its built-in cross-AZ and cross-region capabilities.

    -

    Cost


    +

    Cost


    Evaluate your budget and cost considerations. EBS charges are based on provisioned capacity, while EFS charges are based on actual usage.

    -

    Conclusion


    +

    Conclusion


    In summary, AWS EBS and AWS EFS are both valuable storage services within the AWS ecosystem, but they serve different purposes and have distinct characteristics. EBS is ideal for block-level storage with high-performance requirements and provides data persistence within a single Availability Zone. On the other hand, EFS is designed for scalable file-level storage. Your choice between the two should be based on your specific use case and requirements. Understanding these differences is crucial for optimizing your AWS storage strategy and ensuring the best performance and cost-efficiency for your applications.

    @@ -829,7 +829,7 @@

    Andrewsy

    @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    @@ -946,18 +946,18 @@

    Andrewsy

    - - +
    diff --git a/2022/10/17/elastic-load-balancing-elb-overview/index.html b/2022/10/17/elastic-load-balancing-elb-overview/index.html index d1c00688..8c3f3792 100644 --- a/2022/10/17/elastic-load-balancing-elb-overview/index.html +++ b/2022/10/17/elastic-load-balancing-elb-overview/index.html @@ -561,12 +561,12 @@

    -

    What is load balancing


    +

    What is load balancing


    Load Balances are servers that forward traffic to multiple servers (e.g., EC2 instances) downstream.

    AWS ELB
    AWS ELB
    -

    Why use a load balancing


    +

    Why use a load balancing


    • Spread load across multiple downstream instances
    • @@ -578,7 +578,7 @@

      Why use a load ba
    • Hight availability across zones
    • Separate public traffic from private traffic
    -

    Why use an Elastic Load Balancer


    +

    Why use an Elastic Load Balancer


    • An Elastic Load Balancer is a managed load balancer
        @@ -595,7 +595,7 @@

        Why use an

    -

    Health Checks


    +

    Health Checks


    @@ -922,18 +922,18 @@

    Andrewsy

    - - +
    @@ -943,18 +943,18 @@

    Andrewsy

    - - +
    @@ -964,18 +964,18 @@

    Andrewsy

    - - +
    diff --git a/2022/10/21/elastic-load-balancer-clb-alb-nlb-and-gwlb/index.html b/2022/10/21/elastic-load-balancer-clb-alb-nlb-and-gwlb/index.html index 3a1a49e3..5779abd6 100644 --- a/2022/10/21/elastic-load-balancer-clb-alb-nlb-and-gwlb/index.html +++ b/2022/10/21/elastic-load-balancer-clb-alb-nlb-and-gwlb/index.html @@ -870,18 +870,18 @@

    Andrewsy

    - - +
    @@ -891,18 +891,18 @@

    Andrewsy

    - - +
    @@ -912,18 +912,18 @@

    Andrewsy

    - - +
    @@ -933,18 +933,18 @@

    Andrewsy

    - - +
    @@ -954,18 +954,18 @@

    Andrewsy

    - - +
    diff --git a/2022/11/15/aws-auto-scaling-group/index.html b/2022/11/15/aws-auto-scaling-group/index.html index e3abd189..6335cbd9 100644 --- a/2022/11/15/aws-auto-scaling-group/index.html +++ b/2022/11/15/aws-auto-scaling-group/index.html @@ -950,18 +950,18 @@

    Andrewsy

    - - +
    @@ -971,18 +971,18 @@

    Andrewsy

    - - +
    @@ -992,18 +992,18 @@

    Andrewsy

    - - +
    @@ -1013,18 +1013,18 @@

    Andrewsy

    - - +
    @@ -1034,18 +1034,18 @@

    Andrewsy

    - - +
    diff --git a/2023/01/02/understanding-tensorflow-for-machine-learning/index.html b/2023/01/02/understanding-tensorflow-for-machine-learning/index.html index 00fbce69..3564c57a 100644 --- a/2023/01/02/understanding-tensorflow-for-machine-learning/index.html +++ b/2023/01/02/understanding-tensorflow-for-machine-learning/index.html @@ -836,18 +836,18 @@

    Andrewsy

    - - +
    @@ -857,18 +857,18 @@

    Andrewsy

    - - +
    @@ -878,18 +878,18 @@

    Andrewsy

    - - +
    @@ -899,18 +899,18 @@

    Andrewsy

    - - +
    @@ -920,18 +920,18 @@

    Andrewsy

    - - +
    diff --git a/2023/01/11/a-large-movie-review-dataset-for-binary-sementic-analysis/index.html b/2023/01/11/a-large-movie-review-dataset-for-binary-sementic-analysis/index.html index fc953abf..116cca36 100644 --- a/2023/01/11/a-large-movie-review-dataset-for-binary-sementic-analysis/index.html +++ b/2023/01/11/a-large-movie-review-dataset-for-binary-sementic-analysis/index.html @@ -829,18 +829,18 @@

    Andrewsy

    - - +
    @@ -850,18 +850,18 @@

    Andrewsy

    - - +
    @@ -871,18 +871,18 @@

    Andrewsy

    - - +
    @@ -892,18 +892,18 @@

    Andrewsy

    - - +
    @@ -913,18 +913,18 @@

    Andrewsy

    - - +
    diff --git a/2023/02/01/the-pile-a-comprehensive-dataset-for-training-nlp-models/index.html b/2023/02/01/the-pile-a-comprehensive-dataset-for-training-nlp-models/index.html index 549ffca9..09076719 100644 --- a/2023/02/01/the-pile-a-comprehensive-dataset-for-training-nlp-models/index.html +++ b/2023/02/01/the-pile-a-comprehensive-dataset-for-training-nlp-models/index.html @@ -848,18 +848,18 @@

    Andrewsy

    - - +
    @@ -869,18 +869,18 @@

    Andrewsy

    - - +
    @@ -890,18 +890,18 @@

    Andrewsy

    - - +
    @@ -911,18 +911,18 @@

    Andrewsy

    - - +
    @@ -932,18 +932,18 @@

    Andrewsy

    - - +
    diff --git a/2023/02/14/machine-learning-sentiment-analysis-load-raw-dataset/index.html b/2023/02/14/machine-learning-sentiment-analysis-load-raw-dataset/index.html index c9fdcf04..e990bde9 100644 --- a/2023/02/14/machine-learning-sentiment-analysis-load-raw-dataset/index.html +++ b/2023/02/14/machine-learning-sentiment-analysis-load-raw-dataset/index.html @@ -842,18 +842,18 @@

    Andrewsy

    - - +
    @@ -863,18 +863,18 @@

    Andrewsy

    - - +
    @@ -884,18 +884,18 @@

    Andrewsy

    - - +
    @@ -905,18 +905,18 @@

    Andrewsy

    - - +
    @@ -926,18 +926,18 @@

    Andrewsy

    - - +
    diff --git a/2023/02/21/machine-learning-sentiment-analysis-text-vectorization/index.html b/2023/02/21/machine-learning-sentiment-analysis-text-vectorization/index.html index 349c5756..d9427169 100644 --- a/2023/02/21/machine-learning-sentiment-analysis-text-vectorization/index.html +++ b/2023/02/21/machine-learning-sentiment-analysis-text-vectorization/index.html @@ -850,18 +850,18 @@

    Andrewsy

    - - +
    @@ -871,18 +871,18 @@

    Andrewsy

    - - +
    @@ -892,18 +892,18 @@

    Andrewsy

    - - +
    @@ -913,18 +913,18 @@

    Andrewsy

    - - +
    @@ -934,18 +934,18 @@

    Andrewsy

    - - +
    diff --git a/2023/03/12/machine-learning-sentiment-analysis-build-model/index.html b/2023/03/12/machine-learning-sentiment-analysis-build-model/index.html index 35b691d0..67e68930 100644 --- a/2023/03/12/machine-learning-sentiment-analysis-build-model/index.html +++ b/2023/03/12/machine-learning-sentiment-analysis-build-model/index.html @@ -841,18 +841,18 @@

    Andrewsy

    - - +
    @@ -862,18 +862,18 @@

    Andrewsy

    - - +
    @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    diff --git a/2023/03/25/machine-learning-sentiment-analysis-compile-model/index.html b/2023/03/25/machine-learning-sentiment-analysis-compile-model/index.html index 325488fb..ed592189 100644 --- a/2023/03/25/machine-learning-sentiment-analysis-compile-model/index.html +++ b/2023/03/25/machine-learning-sentiment-analysis-compile-model/index.html @@ -870,18 +870,18 @@

    Andrewsy

    - - +
    @@ -891,18 +891,18 @@

    Andrewsy

    - - +
    @@ -912,18 +912,18 @@

    Andrewsy

    - - +
    @@ -933,18 +933,18 @@

    Andrewsy

    - - +
    @@ -954,18 +954,18 @@

    Andrewsy

    - - +
    diff --git a/2023/04/02/machine-learning-sentiment-analysis-train-model/index.html b/2023/04/02/machine-learning-sentiment-analysis-train-model/index.html index 0ba5f3a3..032173c8 100644 --- a/2023/04/02/machine-learning-sentiment-analysis-train-model/index.html +++ b/2023/04/02/machine-learning-sentiment-analysis-train-model/index.html @@ -844,18 +844,18 @@

    Andrewsy

    - - +
    @@ -865,18 +865,18 @@

    Andrewsy

    - - +
    @@ -886,18 +886,18 @@

    Andrewsy

    - - +
    @@ -907,18 +907,18 @@

    Andrewsy

    - - +
    @@ -928,18 +928,18 @@

    Andrewsy

    - - +
    diff --git a/2023/04/18/machine-learning-sentiment-analysis-evaluate-save-model/index.html b/2023/04/18/machine-learning-sentiment-analysis-evaluate-save-model/index.html index a406dcff..2bef87c1 100644 --- a/2023/04/18/machine-learning-sentiment-analysis-evaluate-save-model/index.html +++ b/2023/04/18/machine-learning-sentiment-analysis-evaluate-save-model/index.html @@ -836,18 +836,18 @@

    Andrewsy

    - - +
    @@ -857,18 +857,18 @@

    Andrewsy

    - - +
    @@ -878,18 +878,18 @@

    Andrewsy

    - - +
    @@ -899,18 +899,18 @@

    Andrewsy

    - - +
    @@ -920,18 +920,18 @@

    Andrewsy

    - - +
    diff --git a/2023/05/02/machine-learning-sentiment-analysis-load-model-predict/index.html b/2023/05/02/machine-learning-sentiment-analysis-load-model-predict/index.html index 8ecc1adb..9d13c7ed 100644 --- a/2023/05/02/machine-learning-sentiment-analysis-load-model-predict/index.html +++ b/2023/05/02/machine-learning-sentiment-analysis-load-model-predict/index.html @@ -866,18 +866,18 @@

    Andrewsy

    - - +
    @@ -887,18 +887,18 @@

    Andrewsy

    - - +
    @@ -908,18 +908,18 @@

    Andrewsy

    - - +
    @@ -929,18 +929,18 @@

    Andrewsy

    - - +
    @@ -950,18 +950,18 @@

    Andrewsy

    - - +
    diff --git a/2023/05/25/how-to-install-additional-msi-components/index.html b/2023/05/25/how-to-install-additional-msi-components/index.html index a56e82bb..a83a4acc 100644 --- a/2023/05/25/how-to-install-additional-msi-components/index.html +++ b/2023/05/25/how-to-install-additional-msi-components/index.html @@ -845,18 +845,18 @@

    Andrewsy

    - - +
    @@ -866,18 +866,18 @@

    Andrewsy

    - - +
    @@ -887,18 +887,18 @@

    Andrewsy

    - - +
    @@ -908,18 +908,18 @@

    Andrewsy

    - - +
    @@ -929,18 +929,18 @@

    Andrewsy

    - - +
    diff --git a/2023/09/05/aws-glue-databrew-data-preparation-tool/index.html b/2023/09/05/aws-glue-databrew-data-preparation-tool/index.html index 5e6f7631..40164acd 100644 --- a/2023/09/05/aws-glue-databrew-data-preparation-tool/index.html +++ b/2023/09/05/aws-glue-databrew-data-preparation-tool/index.html @@ -562,7 +562,7 @@

    数据准备是数据分析和机器学习的关键步骤之一。AWS Glue DataBrew 是 Amazon Web Services(AWS)提供的一项强大工具,旨在帮助数据工程师、数据分析师和数据科学家轻松地准备数据以进行分析、报告和机器学习。本文将深入探讨 AWS Glue DataBrew 的特点、优势、使用场景和如何入门。
    AWS Glue DataBrew

    -

    AWS Glue DataBrew 简介


    +

    AWS Glue DataBrew 简介


    AWS Glue DataBrew 是一项全托管的数据准备服务,它通过可视化界面和自动化工具简化了数据清理、转换和准备的过程。以下是 AWS Glue DataBrew 的一些关键特点:

    可视化数据准备

    DataBrew 提供了直观的用户界面,使用户能够轻松地探索、清理和转换数据,而无需编写复杂的代码。

    @@ -572,14 +572,14 @@

    数据转换和清洗

    您可以使用 DataBrew 进行各种数据转换和清洗操作,如删除重复数据、填充缺失值、合并列等。

    工作流程自动化

    DataBrew 支持创建数据准备工作流程,以自动执行多个数据准备任务,提高效率。

    数据监控和审计

    DataBrew 提供数据监控和审计功能,以跟踪数据准备操作,确保数据质量和安全性。

    -

    AWS Glue DataBrew 的优势


    +

    AWS Glue DataBrew 的优势


    为什么要选择 AWS Glue DataBrew 作为数据准备工具?以下是它的一些显著优势:

    降低技术门槛

    DataBrew 的可视化界面使数据准备过程对于不擅长编程的用户也变得更加可行,降低了技术门槛。

    节省时间

    自动化功能和预建的数据转换操作可以大幅节省数据准备的时间,使用户能够更快地获得洞察。

    改进数据质量

    DataBrew 的数据探索和质量评估工具有助于发现和解决数据质量问题,提高数据分析的可靠性。

    与 AWS 生态系统集成

    DataBrew 与其他 AWS 服务集成,可无缝集成到您的数据工作流程中,如 AWS Glue、S3、Redshift 等。

    -

    AWS Glue DataBrew 的使用场景


    +

    AWS Glue DataBrew 的使用场景


    AWS Glue DataBrew 适用于多种使用场景,包括但不限于:

    数据清理和规范化

    将原始数据清理并规范化,以便进行分析和报告。

    @@ -587,7 +587,7 @@

    缺失数据处理

    填充缺失数据或识别缺失数据的模式。

    数据合并和分割

    合并不同来源的数据或拆分包含多个值的列。

    数据质量监控

    持续监控数据质量,以及时发现问题并采取纠正措施。

    -

    入门 AWS Glue DataBrew


    +

    入门 AWS Glue DataBrew


    要开始使用 AWS Glue DataBrew,您可以按照以下步骤操作:

      @@ -598,7 +598,7 @@

      入门 AWS Glue D
    1. 使用 DataBrew:在 DataBrew 的可视化界面中探索、清理和转换数据。
    2. 保存和导出数据:完成数据准备后,您可以将数据保存并导出到其他 AWS 服务或应用程序中。
    -

    总结


    +

    总结


    AWS Glue DataBrew 是一项强大的数据准备工具,它通过可视化界面和自动化功能使数据准备变得更加容易和高效。无论您是数据工程师、数据分析师还是数据科学家,DataBrew 都可以帮助您加速数据分析的过程,从原始数据中提取有价值的信息。开始使用 DataBrew,并体验数据准备的全新方式!

    @@ -821,7 +821,7 @@

    Andrewsy

    @@ -875,18 +875,18 @@

    Andrewsy

    - - +
    @@ -896,18 +896,18 @@

    Andrewsy

    - - +
    @@ -917,18 +917,18 @@

    Andrewsy

    - - +
    @@ -938,18 +938,18 @@

    Andrewsy

    - - +
    diff --git a/2023/09/10/what-is-bixby-capsule/index.html b/2023/09/10/what-is-bixby-capsule/index.html index c69615dc..fb3a0efd 100644 --- a/2023/09/10/what-is-bixby-capsule/index.html +++ b/2023/09/10/what-is-bixby-capsule/index.html @@ -562,13 +562,13 @@

    Bixby Capsule

    -

    导言


    +

    导言


    在今天的数字时代,虚拟助手已经成为我们日常生活的一部分。Bixby,三星电子开发的人工智能助手,是其中一个备受欢迎的助手之一。Bixby Capsule 是扩展 Bixby 功能的关键组成部分,本文将介绍什么是 Bixby Capsule、它的工作原理以及如何开发自己的 Capsule。

    -

    什么是 Bixby Capsule?


    +

    什么是 Bixby Capsule?


    Bixby Capsule 是一个为 Bixby 助手创建自定义功能和技能的容器。它允许开发者创建、部署和共享特定领域的虚拟助手应用程序,使用户能够通过语音和文本与虚拟助手进行交互。Capsule 的核心目标是扩展 Bixby 的能力,使其能够执行特定领域的任务,如设定闹钟、预订餐厅、查询天气、播放音乐等。

    -

    Bixby Capsule 的工作原理


    +

    Bixby Capsule 的工作原理


    了解 Bixby Capsule 的工作原理对于开发者非常重要。下面是 Bixby Capsule 的工作原理的简要概述:

    语音输入或文本输入
    用户通过语音或文本与 Bixby 进行交互,提出请求或问题。

    @@ -577,7 +577,7 @@

    Bixby Capsul

    Capsule 交互
    一旦确定了匹配的 Capsule,Bixby 与该 Capsule 进行交互,将用户的请求传递给 Capsule。

    Capsule 执行
    Capsule 接收用户的请求并执行相关操作,可能需要与外部数据源或服务进行交互以获取信息或执行任务。

    响应用户
    Capsule 返回结果给 Bixby,然后 Bixby 将结果呈现给用户,通常以语音或文本形式。

    -

    如何开发 Bixby Capsule?


    +

    如何开发 Bixby Capsule?


    现在让我们来看看如何开发自己的 Bixby Capsule。以下是一个简要的步骤:

    步骤 1:准备开发环境

    在开始开发之前,您需要准备好开发环境。这包括以下步骤:

    @@ -626,7 +626,7 @@

  • 发布您的 Capsule。
  • 6.2 分享

    您可以分享您的 Capsule 的链接给其他用户,或者在 Bixby Marketplace 上找到它

    -

    结论


    +

    结论


    Bixby Capsule 是一个强大的工具,可以帮助开发者创建自定义虚拟助手应用程序,提供各种功能和技能。了解其工作原理以及按照上述步骤进行开发,将使您能够构建出令人印象深刻的 Bixby Capsules,改善用户体验,扩展 Bixby 的功能。

    @@ -842,7 +842,7 @@

    Andrewsy

    @@ -896,18 +896,18 @@

    Andrewsy

    - - +
    @@ -917,18 +917,18 @@

    Andrewsy

    - - +
    @@ -938,18 +938,18 @@

    Andrewsy

    - - +
    @@ -959,18 +959,18 @@

    Andrewsy

    - - +
    diff --git a/2023/10/21/data-storage-format-parquet-ocr-and-avro/index.html b/2023/10/21/data-storage-format-parquet-ocr-and-avro/index.html index 896af594..d8dfee5a 100644 --- a/2023/10/21/data-storage-format-parquet-ocr-and-avro/index.html +++ b/2023/10/21/data-storage-format-parquet-ocr-and-avro/index.html @@ -847,18 +847,18 @@

    Andrewsy

    - - +
    @@ -868,18 +868,18 @@

    Andrewsy

    - - +
    @@ -889,18 +889,18 @@

    Andrewsy

    - - +
    @@ -910,18 +910,18 @@

    Andrewsy

    - - +
    @@ -931,18 +931,18 @@

    Andrewsy

    - - +
    diff --git a/2023/11/19/a-fast-in-process-analytical-database-duckdb/index.html b/2023/11/19/a-fast-in-process-analytical-database-duckdb/index.html index dc8f4749..adfc51fe 100644 --- a/2023/11/19/a-fast-in-process-analytical-database-duckdb/index.html +++ b/2023/11/19/a-fast-in-process-analytical-database-duckdb/index.html @@ -618,6 +618,10 @@

    Parquet + + Python + + DataBase @@ -626,10 +630,6 @@

    DuckDB - - Python - - @@ -862,18 +862,18 @@

    Andrewsy

    - - +
    @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    @@ -946,18 +946,18 @@

    Andrewsy

    - - +
    diff --git a/2023/12/16/rag-info-retrieve-and-text-generation-combination/index.html b/2023/12/16/rag-info-retrieve-and-text-generation-combination/index.html index 9f9b82d2..12c2d88f 100644 --- a/2023/12/16/rag-info-retrieve-and-text-generation-combination/index.html +++ b/2023/12/16/rag-info-retrieve-and-text-generation-combination/index.html @@ -848,18 +848,18 @@

    Andrewsy

    - - +
    @@ -869,18 +869,18 @@

    Andrewsy

    - - +
    @@ -890,18 +890,18 @@

    Andrewsy

    - - +
    @@ -911,18 +911,18 @@

    Andrewsy

    - - +
    @@ -932,18 +932,18 @@

    Andrewsy

    - - +
    diff --git a/2024/01/03/understand-es6-symbol/index.html b/2024/01/03/understand-es6-symbol/index.html index 9ef02ecd..505dfc32 100644 --- a/2024/01/03/understand-es6-symbol/index.html +++ b/2024/01/03/understand-es6-symbol/index.html @@ -822,18 +822,18 @@

    Andrewsy

    - - +
    @@ -843,18 +843,18 @@

    Andrewsy

    - - +
    @@ -864,18 +864,18 @@

    Andrewsy

    - - +
    @@ -885,18 +885,18 @@

    Andrewsy

    - - +
    @@ -906,18 +906,18 @@

    Andrewsy

    - - +
    diff --git a/2024/01/19/javascript-generator-powerful/index.html b/2024/01/19/javascript-generator-powerful/index.html index 411ba698..758c6892 100644 --- a/2024/01/19/javascript-generator-powerful/index.html +++ b/2024/01/19/javascript-generator-powerful/index.html @@ -818,18 +818,18 @@

    Andrewsy

    - - +
    @@ -839,18 +839,18 @@

    Andrewsy

    - - +
    @@ -860,18 +860,18 @@

    Andrewsy

    - - +
    @@ -881,18 +881,18 @@

    Andrewsy

    - - +
    @@ -902,18 +902,18 @@

    Andrewsy

    - - +
    diff --git a/2024/01/22/redis-single-thread-speed/index.html b/2024/01/22/redis-single-thread-speed/index.html index 9d887706..bd09b77a 100644 --- a/2024/01/22/redis-single-thread-speed/index.html +++ b/2024/01/22/redis-single-thread-speed/index.html @@ -810,18 +810,18 @@

    Andrewsy

    - - +
    @@ -831,18 +831,18 @@

    Andrewsy

    - - +
    @@ -852,18 +852,18 @@

    Andrewsy

    - - +
    @@ -873,18 +873,18 @@

    Andrewsy

    - - +
    @@ -894,18 +894,18 @@

    Andrewsy

    - - +
    diff --git a/2024/01/25/why-redis-choose-single-thread/index.html b/2024/01/25/why-redis-choose-single-thread/index.html index 54face94..4db2865d 100644 --- a/2024/01/25/why-redis-choose-single-thread/index.html +++ b/2024/01/25/why-redis-choose-single-thread/index.html @@ -816,18 +816,18 @@

    Andrewsy

    - - +
    @@ -837,18 +837,18 @@

    Andrewsy

    - - +
    @@ -858,18 +858,18 @@

    Andrewsy

    - - +
    @@ -879,18 +879,18 @@

    Andrewsy

    - - +
    @@ -900,18 +900,18 @@

    Andrewsy

    - - +
    diff --git a/2024/02/11/pulumi-a-powerful-iac-to-manage-the-cloud-infrastructure/index.html b/2024/02/11/pulumi-a-powerful-iac-to-manage-the-cloud-infrastructure/index.html index 61d9a3fb..e3eea984 100644 --- a/2024/02/11/pulumi-a-powerful-iac-to-manage-the-cloud-infrastructure/index.html +++ b/2024/02/11/pulumi-a-powerful-iac-to-manage-the-cloud-infrastructure/index.html @@ -624,6 +624,10 @@

    @@ -897,18 +897,18 @@

    Andrewsy

    - - +
    @@ -918,18 +918,18 @@

    Andrewsy

    - - +
    @@ -939,18 +939,18 @@

    Andrewsy

    - - +
    @@ -960,18 +960,18 @@

    Andrewsy

    - - +
    diff --git a/2024/02/27/importing-existing-cloud-resources-with-pulumi/index.html b/2024/02/27/importing-existing-cloud-resources-with-pulumi/index.html index 2cd09ebd..7379ec07 100644 --- a/2024/02/27/importing-existing-cloud-resources-with-pulumi/index.html +++ b/2024/02/27/importing-existing-cloud-resources-with-pulumi/index.html @@ -636,6 +636,10 @@

    Andrewsy

    - - +
    @@ -909,18 +909,18 @@

    Andrewsy

    - - +
    @@ -930,18 +930,18 @@

    Andrewsy

    - - +
    @@ -951,18 +951,18 @@

    Andrewsy

    - - +
    @@ -972,18 +972,18 @@

    Andrewsy

    - - +
    diff --git a/2024/03/13/migrate-a-legacy-application-to-aws-cloud/index.html b/2024/03/13/migrate-a-legacy-application-to-aws-cloud/index.html index 1a8b92b5..57a1d793 100644 --- a/2024/03/13/migrate-a-legacy-application-to-aws-cloud/index.html +++ b/2024/03/13/migrate-a-legacy-application-to-aws-cloud/index.html @@ -606,6 +606,10 @@

    Summa @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    @@ -946,18 +946,18 @@

    Andrewsy

    - - +
    diff --git a/2024/04/18/the-points-of-aws-china-cloudfront-you-need-to-notice/index.html b/2024/04/18/the-points-of-aws-china-cloudfront-you-need-to-notice/index.html index 9a196171..0917510c 100644 --- a/2024/04/18/the-points-of-aws-china-cloudfront-you-need-to-notice/index.html +++ b/2024/04/18/the-points-of-aws-china-cloudfront-you-need-to-notice/index.html @@ -864,18 +864,18 @@

    Andrewsy

    - - +
    @@ -885,18 +885,18 @@

    Andrewsy

    - - +
    @@ -906,18 +906,18 @@

    Andrewsy

    - - +
    @@ -927,18 +927,18 @@

    Andrewsy

    - - +
    @@ -948,18 +948,18 @@

    Andrewsy

    - - +
    diff --git a/2024/06/14/understanding-the-x-frame-options-http-header/index.html b/2024/06/14/understanding-the-x-frame-options-http-header/index.html index 47d5a978..fd4db44c 100644 --- a/2024/06/14/understanding-the-x-frame-options-http-header/index.html +++ b/2024/06/14/understanding-the-x-frame-options-http-header/index.html @@ -853,18 +853,18 @@

    Andrewsy

    - - +
    @@ -874,18 +874,18 @@

    Andrewsy

    - - +
    @@ -895,18 +895,18 @@

    Andrewsy

    - - +
    @@ -916,18 +916,18 @@

    Andrewsy

    - - +
    @@ -937,18 +937,18 @@

    Andrewsy

    - - +
    diff --git a/2024/06/25/keras3-0-a-multi-framework-machine-learning-library/index.html b/2024/06/25/keras3-0-a-multi-framework-machine-learning-library/index.html index 6e0d28bc..85f693c6 100644 --- a/2024/06/25/keras3-0-a-multi-framework-machine-learning-library/index.html +++ b/2024/06/25/keras3-0-a-multi-framework-machine-learning-library/index.html @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    @@ -946,18 +946,18 @@

    Andrewsy

    - - +
    @@ -967,18 +967,18 @@

    Andrewsy

    - - +
    diff --git a/2024/07/04/using-pulumi-to-import-the-aws-resources-of-the-other-region/index.html b/2024/07/04/using-pulumi-to-import-the-aws-resources-of-the-other-region/index.html index 48567508..6db61b80 100644 --- a/2024/07/04/using-pulumi-to-import-the-aws-resources-of-the-other-region/index.html +++ b/2024/07/04/using-pulumi-to-import-the-aws-resources-of-the-other-region/index.html @@ -608,6 +608,10 @@

    Summa @@ -881,18 +881,18 @@

    Andrewsy

    - - +
    @@ -902,18 +902,18 @@

    Andrewsy

    - - +
    @@ -923,18 +923,18 @@

    Andrewsy

    - - +
    @@ -944,18 +944,18 @@

    Andrewsy

    - - +
    diff --git a/2024/07/26/mysql-8-x-ctes-feature-with-clause/index.html b/2024/07/26/mysql-8-x-ctes-feature-with-clause/index.html index d085a121..8922638e 100644 --- a/2024/07/26/mysql-8-x-ctes-feature-with-clause/index.html +++ b/2024/07/26/mysql-8-x-ctes-feature-with-clause/index.html @@ -837,18 +837,18 @@

    Andrewsy

    - - +
    @@ -858,18 +858,18 @@

    Andrewsy

    - - +
    @@ -879,18 +879,18 @@

    Andrewsy

    - - +
    @@ -900,18 +900,18 @@

    Andrewsy

    - - +
    @@ -921,18 +921,18 @@

    Andrewsy

    - - +
    diff --git a/2024/08/02/how-to-query-tree-structured-relation-data-in-mysql/index.html b/2024/08/02/how-to-query-tree-structured-relation-data-in-mysql/index.html index a854f97d..73c0587a 100644 --- a/2024/08/02/how-to-query-tree-structured-relation-data-in-mysql/index.html +++ b/2024/08/02/how-to-query-tree-structured-relation-data-in-mysql/index.html @@ -817,18 +817,18 @@

    Andrewsy

    - - +
    @@ -838,18 +838,18 @@

    Andrewsy

    - - +
    @@ -859,18 +859,18 @@

    Andrewsy

    - - +
    @@ -880,18 +880,18 @@

    Andrewsy

    - - +
    @@ -901,18 +901,18 @@

    Andrewsy

    - - +
    diff --git a/2024/08/15/what-is-lag-in-sql/index.html b/2024/08/15/what-is-lag-in-sql/index.html index 144d1ed8..1ac70417 100644 --- a/2024/08/15/what-is-lag-in-sql/index.html +++ b/2024/08/15/what-is-lag-in-sql/index.html @@ -879,18 +879,18 @@

    Andrewsy

    - - +
    @@ -900,18 +900,18 @@

    Andrewsy

    - - +
    @@ -921,18 +921,18 @@

    Andrewsy

    - - +
    @@ -942,18 +942,18 @@

    Andrewsy

    - - +
    @@ -963,18 +963,18 @@

    Andrewsy

    - - +
    diff --git a/2024/08/16/introduce-is-distinct-from-in-sql/index.html b/2024/08/16/introduce-is-distinct-from-in-sql/index.html index 3ef2a327..4b46cf63 100644 --- a/2024/08/16/introduce-is-distinct-from-in-sql/index.html +++ b/2024/08/16/introduce-is-distinct-from-in-sql/index.html @@ -634,7 +634,7 @@

    数据更新

    在更新数据时,使用 IS DISTINCT FROM 可以确保只有在数据实际变化时才进行更新,从而避免不必要的更新操作。

    1
    2
    3
    UPDATE users
    SET email = 'new_andrewsy@email.com'
    WHERE email IS DISTINCT FROM 'new_andrewsy@email.com';
    -

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    +

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    数据比较

    在进行复杂的数据比较时,尤其是涉及到 NULL 值时,IS DISTINCT FROM 提供了更直观的比较逻辑。例如,在合并两个数据集时,可以使用此操作符来确保唯一性。

    1
    2
    3
    4
    5
    6
    7
    8
    SELECT 
    *
    FROM
    dataset1
    FULL OUTER JOIN
    dataset2
    ON
    dataset1.id IS DISTINCT FROM dataset2.id
    @@ -891,18 +891,18 @@

    Andrewsy

    - - +
    @@ -912,18 +912,18 @@

    Andrewsy

    - - +
    @@ -933,18 +933,18 @@

    Andrewsy

    - - +
    @@ -954,18 +954,18 @@

    Andrewsy

    - - +
    @@ -975,18 +975,18 @@

    Andrewsy

    - - +
    diff --git a/2024/09/10/aws-s3-event-replacing-space-with-character-sign-in-object-key-name/index.html b/2024/09/10/aws-s3-event-replacing-space-with-character-sign-in-object-key-name/index.html index 52723cbd..2052dbcf 100644 --- a/2024/09/10/aws-s3-event-replacing-space-with-character-sign-in-object-key-name/index.html +++ b/2024/09/10/aws-s3-event-replacing-space-with-character-sign-in-object-key-name/index.html @@ -581,6 +581,10 @@

    @@ -854,18 +854,18 @@

    Andrewsy

    - - +
    @@ -875,18 +875,18 @@

    Andrewsy

    - - +
    @@ -896,18 +896,18 @@

    Andrewsy

    - - +
    @@ -917,18 +917,18 @@

    Andrewsy

    - - +
    diff --git a/2024/09/19/user-survey-feedback-sentiment-analysis-base-on-aws-cloud-solution/index.html b/2024/09/19/user-survey-feedback-sentiment-analysis-base-on-aws-cloud-solution/index.html index 05b7994c..ff58a446 100644 --- a/2024/09/19/user-survey-feedback-sentiment-analysis-base-on-aws-cloud-solution/index.html +++ b/2024/09/19/user-survey-feedback-sentiment-analysis-base-on-aws-cloud-solution/index.html @@ -593,6 +593,10 @@

    + + Python + + AWS @@ -605,10 +609,6 @@

    - Python - - @@ -841,18 +841,18 @@

    Andrewsy

    - - +
    @@ -862,18 +862,18 @@

    Andrewsy

    - - +
    @@ -883,18 +883,18 @@

    Andrewsy

    - - +
    @@ -904,18 +904,18 @@

    Andrewsy

    - - +
    @@ -925,18 +925,18 @@

    Andrewsy

    - - +
    diff --git a/2024/10/09/aws-glue-iceberg-tables-schema-can-t-be-updated-with-pulumi/index.html b/2024/10/09/aws-glue-iceberg-tables-schema-can-t-be-updated-with-pulumi/index.html index 7a389a1f..7a2bbce2 100644 --- a/2024/10/09/aws-glue-iceberg-tables-schema-can-t-be-updated-with-pulumi/index.html +++ b/2024/10/09/aws-glue-iceberg-tables-schema-can-t-be-updated-with-pulumi/index.html @@ -614,6 +614,10 @@

    @@ -887,18 +887,18 @@

    Andrewsy

    - - +
    @@ -908,18 +908,18 @@

    Andrewsy

    - - +
    @@ -929,18 +929,18 @@

    Andrewsy

    - - +
    @@ -950,18 +950,18 @@

    Andrewsy

    - - +
    diff --git a/2024/10/16/the-useeffect-of-react-runs-twice-in-development-mode/index.html b/2024/10/16/the-useeffect-of-react-runs-twice-in-development-mode/index.html index fb927121..a2a5aef4 100644 --- a/2024/10/16/the-useeffect-of-react-runs-twice-in-development-mode/index.html +++ b/2024/10/16/the-useeffect-of-react-runs-twice-in-development-mode/index.html @@ -840,18 +840,18 @@

    Andrewsy

    - - +
    @@ -861,18 +861,18 @@

    Andrewsy

    - - +
    @@ -882,18 +882,18 @@

    Andrewsy

    - - +
    @@ -903,18 +903,18 @@

    Andrewsy

    - - +
    @@ -924,18 +924,18 @@

    Andrewsy

    - - +
    diff --git a/2024/10/23/intelligent-sql-generator-base-on-spring-ai-with-aws-bedrock/index.html b/2024/10/23/intelligent-sql-generator-base-on-spring-ai-with-aws-bedrock/index.html index 6581187c..99973542 100644 --- a/2024/10/23/intelligent-sql-generator-base-on-spring-ai-with-aws-bedrock/index.html +++ b/2024/10/23/intelligent-sql-generator-base-on-spring-ai-with-aws-bedrock/index.html @@ -907,18 +907,18 @@

    Andrewsy

    - - +
    @@ -928,18 +928,18 @@

    Andrewsy

    - - +
    @@ -949,18 +949,18 @@

    Andrewsy

    - - +
    @@ -970,18 +970,18 @@

    Andrewsy

    - - +
    @@ -991,18 +991,18 @@

    Andrewsy

    - - +
    diff --git a/2024/10/29/data-analysis-chart-by-generative-ai/index.html b/2024/10/29/data-analysis-chart-by-generative-ai/index.html index bf4668cb..f8dcaec6 100644 --- a/2024/10/29/data-analysis-chart-by-generative-ai/index.html +++ b/2024/10/29/data-analysis-chart-by-generative-ai/index.html @@ -617,14 +617,14 @@

    AI - - AWS - - Python + + AWS + + SQL @@ -861,18 +861,18 @@

    Andrewsy

    - - +
    @@ -882,18 +882,18 @@

    Andrewsy

    - - +
    @@ -903,18 +903,18 @@

    Andrewsy

    - - +
    @@ -924,18 +924,18 @@

    Andrewsy

    - - +
    @@ -945,18 +945,18 @@

    Andrewsy

    - - +
    diff --git a/2024/11/01/understanding-the-azure-openai-gpt-4-api-role/index.html b/2024/11/01/understanding-the-azure-openai-gpt-4-api-role/index.html index 2a2efd6c..67c040e3 100644 --- a/2024/11/01/understanding-the-azure-openai-gpt-4-api-role/index.html +++ b/2024/11/01/understanding-the-azure-openai-gpt-4-api-role/index.html @@ -891,18 +891,18 @@

    Andrewsy

    - - +
    @@ -912,18 +912,18 @@

    Andrewsy

    - - +
    @@ -933,18 +933,18 @@

    Andrewsy

    - - +
    @@ -954,18 +954,18 @@

    Andrewsy

    - - +
    @@ -975,18 +975,18 @@

    Andrewsy

    - - +
    diff --git a/2024/11/05/what-is-prompt-engineer/index.html b/2024/11/05/what-is-prompt-engineer/index.html index 6eead9da..cf259b46 100644 --- a/2024/11/05/what-is-prompt-engineer/index.html +++ b/2024/11/05/what-is-prompt-engineer/index.html @@ -930,18 +930,18 @@

    Andrewsy

    - - +
    @@ -951,18 +951,18 @@

    Andrewsy

    - - +
    @@ -972,18 +972,18 @@

    Andrewsy

    - - +
    @@ -993,18 +993,18 @@

    Andrewsy

    - - +
    @@ -1014,18 +1014,18 @@

    Andrewsy

    - - +
    diff --git a/2024/11/12/introduction-to-langchain-make-ai-smarter-and-easy-to-use/index.html b/2024/11/12/introduction-to-langchain-make-ai-smarter-and-easy-to-use/index.html index 2ef02477..c08f4697 100644 --- a/2024/11/12/introduction-to-langchain-make-ai-smarter-and-easy-to-use/index.html +++ b/2024/11/12/introduction-to-langchain-make-ai-smarter-and-easy-to-use/index.html @@ -689,23 +689,6 @@

    - -
    -
    - -
    -
    -
    - Next: -
    详解数据分析中的方差,标准差和异常值的使用
    -
    - -
    -
    - @@ -872,18 +855,18 @@

    Andrewsy

    - - +
    @@ -893,18 +876,18 @@

    Andrewsy

    - - +
    @@ -914,18 +897,18 @@

    Andrewsy

    - - +
    @@ -935,18 +918,18 @@

    Andrewsy

    - - +
    @@ -956,18 +939,18 @@

    Andrewsy

    - - +
    diff --git a/archives/2020/09/index.html b/archives/2020/09/index.html new file mode 100644 index 00000000..0ea9d24c --- /dev/null +++ b/archives/2020/09/index.html @@ -0,0 +1,1524 @@ + + + + + + + + + + + + + + Archives: 2020/9 | Andrewsy's Space + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    +
    +
    +
    + Archives +
    +
    + + + + +
    +
    +
    + +
    + +
    + Archives +
    +
    + + + + +
    +
    +
    + + + + +
    +
    +
    + +
    +
    +
    + +
    +
    + +
    +
    + + + + + + +
    + + + + + + + + + + +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + +
    + + + + + + + + + \ No newline at end of file diff --git a/archives/2020/index.html b/archives/2020/index.html index 89a75cbf..149831bf 100644 --- a/archives/2020/index.html +++ b/archives/2020/index.html @@ -565,6 +565,40 @@

    2020 +
  • + +
    + + + +
    + +
  • + + +
  • + +
    + + + +
    + +
  • + +
  • diff --git a/archives/2024/11/index.html b/archives/2024/11/index.html index b8a7cdaa..de8c1f6d 100644 --- a/archives/2024/11/index.html +++ b/archives/2024/11/index.html @@ -565,23 +565,6 @@

    2024

    -
  • - -
    - - - -
    - -
  • - -
  • diff --git a/archives/2024/index.html b/archives/2024/index.html index 6d2d2b44..5bfa203a 100644 --- a/archives/2024/index.html +++ b/archives/2024/index.html @@ -565,23 +565,6 @@

    2024

    -
  • - -
    - - - -
    - -
  • - -
  • @@ -734,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/2024/page/2/index.html b/archives/2024/page/2/index.html index 891e63f5..1f12e37e 100644 --- a/archives/2024/page/2/index.html +++ b/archives/2024/page/2/index.html @@ -565,23 +565,6 @@

    2024 -
  • - -
    - - - -
    - -
  • - -
  • @@ -734,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/2024/page/3/index.html b/archives/2024/page/3/index.html index d01085ab..735e0548 100644 --- a/archives/2024/page/3/index.html +++ b/archives/2024/page/3/index.html @@ -565,23 +565,6 @@

    2024 -
  • - -
    - - - -
    - -
  • - -
  • diff --git a/archives/index.html b/archives/index.html index 5fce299d..465a3168 100644 --- a/archives/index.html +++ b/archives/index.html @@ -565,23 +565,6 @@

    2024

    -
  • - -
    - - - -
    - -
  • - -
  • @@ -734,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/10/index.html b/archives/page/10/index.html index 90519b91..d5a15e4f 100644 --- a/archives/page/10/index.html +++ b/archives/page/10/index.html @@ -565,6 +565,23 @@

    2015 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/11/index.html b/archives/page/11/index.html index ababece0..af583837 100644 --- a/archives/page/11/index.html +++ b/archives/page/11/index.html @@ -565,6 +565,23 @@

    2015 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/12/index.html b/archives/page/12/index.html index cab936d5..f3ab673c 100644 --- a/archives/page/12/index.html +++ b/archives/page/12/index.html @@ -565,6 +565,23 @@

    2015 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/13/index.html b/archives/page/13/index.html index 245f3394..8a9616f1 100644 --- a/archives/page/13/index.html +++ b/archives/page/13/index.html @@ -565,6 +565,23 @@

    2015 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/14/index.html b/archives/page/14/index.html index eb44e384..d712f1f4 100644 --- a/archives/page/14/index.html +++ b/archives/page/14/index.html @@ -565,6 +565,23 @@

    2015 +
  • + +
    + + + +
    + +
  • + +
  • @@ -723,23 +740,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/15/index.html b/archives/page/15/index.html index e6627e00..d1fa53fd 100644 --- a/archives/page/15/index.html +++ b/archives/page/15/index.html @@ -565,6 +565,23 @@

    2014 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/16/index.html b/archives/page/16/index.html index 87cc4e25..fad2a981 100644 --- a/archives/page/16/index.html +++ b/archives/page/16/index.html @@ -565,6 +565,23 @@

    2014 +
  • + +
    + + + +
    + +
  • + +
  • @@ -717,23 +734,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/17/index.html b/archives/page/17/index.html index 2576b113..059fe25e 100644 --- a/archives/page/17/index.html +++ b/archives/page/17/index.html @@ -565,6 +565,23 @@

    2014 +
  • + +
    + + + +
    + +
  • + +
  • diff --git a/archives/page/2/index.html b/archives/page/2/index.html index 7756fbc1..9a82b12f 100644 --- a/archives/page/2/index.html +++ b/archives/page/2/index.html @@ -565,23 +565,6 @@

    2024

    -
  • - -
    - - - -
    - -
  • - -
  • @@ -734,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/3/index.html b/archives/page/3/index.html index 096c7afb..948841c1 100644 --- a/archives/page/3/index.html +++ b/archives/page/3/index.html @@ -565,23 +565,6 @@

    2024 -
  • - -
    - - - -
    - -
  • - -
  • @@ -740,6 +723,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/4/index.html b/archives/page/4/index.html index c6b6e5d1..1189c4d6 100644 --- a/archives/page/4/index.html +++ b/archives/page/4/index.html @@ -565,23 +565,6 @@

    2023 -
  • - -
    - - - -
    - -
  • - -
  • @@ -734,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/5/index.html b/archives/page/5/index.html index bb1ed2f8..1995b194 100644 --- a/archives/page/5/index.html +++ b/archives/page/5/index.html @@ -560,29 +560,6 @@

    -
    -
    - 2023 -
    - -
  • - -
    - - - -
    - -
  • - - -
    2022 @@ -740,6 +717,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/6/index.html b/archives/page/6/index.html index ce2aa1b7..3fa01084 100644 --- a/archives/page/6/index.html +++ b/archives/page/6/index.html @@ -565,23 +565,6 @@

    2022 -
  • - -
    - - - -
    - -
  • - -
  • @@ -740,6 +723,23 @@

  • + +
  • + +
    + + + +
    + +
  • + diff --git a/archives/page/7/index.html b/archives/page/7/index.html index e74760c8..2e4d9a3c 100644 --- a/archives/page/7/index.html +++ b/archives/page/7/index.html @@ -568,42 +568,59 @@

  • - - +
  • + +
    +
    + 2020 +
    +
  • - - +
  • +
  • -
  • +
  • @@ -723,23 +740,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/8/index.html b/archives/page/8/index.html index f8ba1487..ab9b8e14 100644 --- a/archives/page/8/index.html +++ b/archives/page/8/index.html @@ -560,6 +560,29 @@

    +
    +
    + 2020 +
    + +
  • + +
    + + + +
    + +
  • + + +
    2019 @@ -729,29 +752,6 @@

    - - -
    -
    - 2016 -
    - -
  • - -
    - - - -
    - -
  • - diff --git a/archives/page/9/index.html b/archives/page/9/index.html index b2396b13..c31856d6 100644 --- a/archives/page/9/index.html +++ b/archives/page/9/index.html @@ -565,6 +565,23 @@

    2016 +
  • + +
    + + + +
    + +
  • + +
  • @@ -723,23 +740,6 @@

  • - -
  • - -
    - - - -
    - -
  • - diff --git a/atom.xml b/atom.xml index f0588e92..fc026e77 100644 --- a/atom.xml +++ b/atom.xml @@ -6,7 +6,7 @@ - 2024-11-15T06:39:09.202Z + 2024-11-18T09:56:32.361Z https://stonefishy.github.io/ @@ -16,38 +16,12 @@ Hexo - - 详解数据分析中的方差,标准差和异常值的使用 - - https://stonefishy.github.io/2024/11/15/data-analysis-standard-deviation-variance-outliers/ - 2024-11-15T11:22:27.000Z - 2024-11-15T06:39:09.202Z - - 在数据分析中,方差(Variance)标准差(Standard Deviation)异常值(Outliers)是分析数据分布和变异性的重要统计工具。理解这些概念,并能够有效地应用它们,对于数据清洗、探索性数据分析(EDA)以及构建准确的预测模型至关重要。

    方差(Variance)

    方差是反映数据集中各数据点与数据均值之间差异的一个重要指标。它的大小可以用来衡量数据的离散程度。具体来说,方差越大,数据的变动越大,反之则越小。

    x_i为数据集中的每个数据点,
    μ 为数据集的均值,
    n 为数据的总个数。
    方差就是所有数据点与均值的差值的平方的平均值。

    方差计算时,我们将每个数据点与均值的差值进行平方,然后求平均。方差的单位是原始数据单位的平方,因此有时它的解释意义不如标准差直观。

    标准差(Standard Deviation)

    标准差是方差的平方根。与方差不同,标准差的单位与原始数据相同,因此更易于理解。标准差越大,说明数据的波动性越大;标准差越小,则说明数据较为集中。

    标准差的计算公式为:

    方差的平方根即为标准差。

    标准差与方差的关系

    标准差和方差都用来描述数据的离散程度。标准差比方差更常用,因为它的单位与数据本身一致,解释起来更加直观。

    异常值(Outliers)

    异常值是指在数据集中远离其他数据点的值。异常值的存在往往是由于数据录入错误、测量误差,或者数据本身存在极端波动。异常值会影响数据的分布,进而影响数据分析结果,尤其是均值、方差和标准差等统计量。

    如何识别异常值

    常用的异常值检测方法有:

    箱线图法(Boxplot):通过计算四分位数和四分位距(IQR)来识别异常值。通常,位于Q1 - 1.5 * IQR 或 Q3 + 1.5 * IQR之外的数据点被认为是异常值。
    Z-score法:通过计算数据点与均值的标准差倍数来判断数据点是否为异常值。一般认为,Z-score超过3或小于-3的数据为异常值。

    异常值的处理

    在数据分析中,我们通常会在数据预处理阶段识别并处理异常值。常见的处理方法包括:

    • 删除异常值:直接从数据集中删除异常值。
    • 替换异常值:用均值、中位数等替代异常值。
    • 保留异常值:在某些情况下,异常值可能包含重要信息,因此也可以选择保留异常值。

    举个列子

    假设我们有一个包含学生成绩的数据集,其中有一个异常值(200)。

    1
    2
    3
    4
    5
    6
    7
    import numpy as np
    import pandas as pd
    from scipy import stats
    import matplotlib.pyplot as plt

    # 创建数据集:一组学生成绩,其中包括异常值200
    data = [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]

    计算方差和标准差

    我们使用NumPy来计算数据的方差和标准差。

    1
    2
    3
    4
    5
    6
    7
    # 计算方差
    variance_value = np.var(data)
    print(f"方差 (Variance): {variance_value}")

    # 计算标准差
    std_dev_value = np.std(data)
    print(f"标准差 (Standard Deviation): {std_dev_value}")

    输出:

    1
    2
    方差 (Variance): 781.734375
    标准差 (Standard Deviation): 27.959513139538036

    从输出可以看到,这组数据的方差为781.73,标准差为27.95,这表明数据的离散程度相对较高。特别是最后的异常值(200)对标准差的影响很大。

    异常值检测与处理

    使用Z-score检测异常值

    我们使用Z-score来检测数据中的异常值。如果Z-score大于3或小于-3,则该数据点被认为是异常值。

    1
    2
    3
    4
    5
    6
    7
    # 计算Z-score
    z_scores = stats.zscore(data)
    print(f"Z-scores: {z_scores}")

    # 检测异常值
    outliers = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) > 3]
    print(f"检测到的异常值: {outliers}")

    使用scipystats模块可以计算Z-score。输出结果中,Z-score大于3的异常值是200。

    输出:

    1
    2
    3
    4
    Z-scores: [-0.51413628 -0.33530627 -0.15647626 -0.08494425 -0.33530627 -0.22800826
    -0.69296629 -0.58566828 -0.08494425 0.02235375 0.20118376 -0.33530627
    -0.08494425 -0.22800826 -0.33530627 3.77778395]
    检测到的异常值: [200]

    从输出结果中可以看出,Z-score大于3的异常值是200。这是由于200与其他数据点的差异过大,Z-score值为9.39,远远超过了3。

    使用箱线图检测异常值

    我们可以绘制箱线图来可视化数据并检测异常值。可以使用matplotlib库绘制箱线图。

    1
    2
    3
    4
    # 绘制箱线图
    plt.boxplot(data)
    plt.title("Boxplot Chart")
    plt.show()
    箱线图
    箱线图

    从箱线图中,200的值处于箱体外,因此被视为异常值。

    处理异常值

    在实际分析中,我们可以选择处理异常值。以下是几种常见的方法:

    删除异常值

    1
    2
    3
    # 删除异常值(Z-score大于3的点)
    cleaned_data = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) <= 3]
    print(f"删除异常值后的数据: {cleaned_data}")

    输出:

    1
    删除异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85]

    替换异常值

    1
    2
    3
    4
    # 替换异常值为中位数
    median_value = np.median(data)
    cleaned_data_with_median = [median_value if np.abs(z_scores[i]) > 3 else data[i] for i in range(len(data))]
    print(f"替换异常值后的数据: {cleaned_data_with_median}")

    输出:

    1
    替换异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 88.0]

    总结

    • 方差和标准差是用于衡量数据离散程度的基本统计量。方差的单位为原始数据单位的平方,而标准差则直接以原始单位表示,更容易解释。
    • 异常值是指那些在数据中与其他数据点差异较大的值,它们可能影响统计分析的结果。在数据清洗阶段,识别和处理异常值是至关重要的一步。

    在Python中,我们可以利用NumPySciPyMatplotlib等库来计算方差、标准差,识别异常值,并根据需要处理异常值。通过掌握这些基本概念和技术,我们数据分析师可以更有效地理解数据的分布特征,发现数据中的潜在问题,做出更加精准的数据分析。

    ]]>
    - - - - - <p>在数据分析中,<code>方差(Variance)</code>、<code>标准差(Standard Deviation)</code>和<code>异常值(Outliers)</code>是分析数据分布和变异性的重要统计工具。理解这些概念,并能够有效地应用它们,对于<c - - - - - - - - - - - -
    - Introduction to LangChain: Make AI Smarter and Easier to use https://stonefishy.github.io/2024/11/12/introduction-to-langchain-make-ai-smarter-and-easy-to-use/ 2024-11-12T13:49:12.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z

    Have you ever wondered how some apps and websites can have conversations with you, answer your questions? Many of these apps use artificial intelligence (AI), like the chatbot you might use to ask questions or get advice. But creating these smart systems can be tricky. This is where a tool called LangChain comes in to help!

    LangChain is a framework that makes it easier for developers to build applications that use AI models, like chatbots or smart helpers. In this blog, we’re going to explain what LangChain is, how it works, and why it’s useful for making AI apps.

    What is LangChain?

    LangChain is a tool for developers that helps them build applications using large language models (LLMs)—the same kind of AI that powers chatbots, writing assistants, and more. LLMs can understand and generate text in a way that sounds like a real person. However, using these models to make powerful apps can be complicated. LangChain makes it easier by offering ready-made building blocks to connect these models to other tools, data, and even databases.

    Think of LangChain like a set of Lego blocks that you can use to build cool things with AI. It saves developers time by giving them ready-made pieces to use, rather than having to create everything from scratch.

    Features of LangChain

    Let’s break down some of the cool features LangChain offers and how they help developers make smarter apps.

    LangChain Features
    LangChain Features
    1. Chains: Putting Multiple Steps Together
      Imagine you have a robot that can help with math homework. The robot might need to do multiple things to solve a problem. First, it could look up the math formula, then solve the problem, and finally explain the answer. In LangChain, these steps are called chains.

      A chain is a sequence of actions where each step depends on the previous one. For example, you could create a chain where:

      First, the app asks the AI to pull data from a website.
      Then, it uses that data to answer a question.
      Finally, it summarizes the answer for the user.

    2. Prompt Management: Talking to AI the Right Way
      When you talk to an AI, how you ask your question or give your instruction is really important. That’s called a prompt. LangChain helps developers make the best prompts by letting them create templates. These templates let developers easily change certain parts of the prompt without having to rewrite it every time.

      For example, if you wanted to ask the AI to summarize a story, you could have a prompt like this:

      “Please summarize the following story: {story}”

      In this template, {story} is a placeholder that can be replaced with any story you want the AI to summarize.

    3. Agents: Letting AI Decide What to Do Next
      Sometimes, a smart system needs to decide what to do next based on the information it gets. For example, if you ask an AI about the weather, it might decide to pull the latest weather data from the internet. This decision-making is done by agents.

      An agent is like a helper that looks at the information it gets and chooses the best action. LangChain helps developers build agents that can make these decisions automatically.

    4. Memory: Remembering What Happened Before
      Have you ever talked to a chatbot and then later felt like it forgot what you said earlier? That can make a conversation feel weird. LangChain helps solve this problem by letting the AI remember what was said earlier in the conversation. This feature is called memory.

      or example, if you ask a chatbot for homework help and then ask a follow-up question, LangChain can help the AI remember the first question and give a more useful answer based on that memory.

    5. Integrations: Connecting to Other Tools and Websites
      Sometimes, an AI app needs to talk to other systems to get more information. LangChain makes this easy by letting developers connect their AI app to other tools This is like having a personal assistant that not only talks to you but also has access to tons of information online.

      For example, an AI app could pull up the latest sports scores, or check the weather for you, using real-time data from the internet.

    6. Retrieval-Augmented Generation (RAG): Getting Smarter Answers
      LangChain also lets AI search for information in real-time. This is called retrieval-augmented generation (RAG). It allows the AI to look up the latest data, like news stories or facts, and use that information to create smarter answers.

      For example, if you ask about the latest trends in video games, the AI can search the web for the most up-to-date information and then explain it to you.

    Why Do Developers Use LangChain?

    There are several reasons why developers might want to use LangChain:

    1. Makes It Easier to Build AI Apps
      Instead of starting from scratch, LangChain gives developers tools that speed up the process of creating AI apps. Developers can use LangChain’s building blocks to create powerful applications without needing to write everything by hand.

    2. It’s Flexible
      LangChain can be used for a wide variety of apps. Whether you want to build a chatbot, a smart search engine, or an app that helps you study, LangChain has tools that make it easier to put everything together.

    3. Saves Time
      Developers don’t have to spend a lot of time figuring out how to make an AI model work with a database or how to chain steps together. LangChain does much of the heavy lifting, so developers can focus on the fun and creative parts of building their apps.

    4. It’s Open-Source
      LangChain is free for anyone to use and improve. It’s open-source, which means developers from all over the world can contribute to making it better. If you’re learning to code or want to help improve the tool, you can!

    Real-World Examples of LangChain

    LangChain is already being used in many cool ways. Here are a few examples:

    1. Chatbots
      Developers can use LangChain to build chatbots that remember previous conversations and can talk to you like a real person. For example, you could create a chatbot to help you study for a test, and it would remember what you’ve learned so far.

    2. Smart Assistants
      LangChain can help build systems that pull information from the internet and use AI to explain things in simple terms. For example, if you’re stuck on a science problem, an AI could look up the topic online and explain it to you in a way you understand.

    3. Automated Content Creation
      Some apps use LangChain to automatically write articles or summaries. For example, a news website could use LangChain to summarize long articles or pull out the key points from reports, saving readers time.

    4. Personalized Search Engines
      LangChain can be used to build search engines that don’t just give you a list of links but also summarize the best results for you. This could help you find the exact answer you need faster.

    How to Get Started with LangChain

    If you’re excited to try out LangChain, here’s how you can get started:

    1. Install Python: LangChain works with Python, a programming language that’s great for beginners.
    2. Install LangChain: You can install LangChain by running the command pip install langchain in Python.
    3. Start Building: Once LangChain is installed, you can start building your own AI-powered applications! LangChain has tutorials and guides to help you learn how to use it.

    For more information, check out the LangChain documentation at https://python.langchain.com/docs/introduction/

    Conclusion

    LangChain is a super helpful tool for developers who want to build cool apps powered by AI. It makes it easier to connect different parts of an app, like databases or the web, with a language model that can understand and generate text. Whether it’s helping with homework, answering questions, or building a chatbot, LangChain is a great way to build smarter, more interactive applications.

    ]]>
    @@ -71,7 +45,7 @@ https://stonefishy.github.io/2024/11/05/what-is-prompt-engineer/ 2024-11-05T14:08:09.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z As artificial intelligence (AI) models, especially large language models (LLMs) like OpenAI’s GPT series, have become increasingly sophisticated, a new role has emerged in the AI ecosystem: the Prompt Engineer. The term might sound technical or niche, but it’s actually pivotal to leveraging AI models effectively. Whether you’re interacting with AI in your personal or professional life, the quality of the interaction largely depends on how well the prompt is designed. This article will explore what a prompt engineer does, the best practices for writing effective prompts, and provide examples comparing outputs with and without a prompt engineer’s expertise.

    What is a Prompt Engineer?

    A Prompt Engineer is someone who specializes in crafting, refining, and optimizing prompts to ensure that AI models respond with the most relevant,accurate,and actionable information. The role requires a blend of creativity, technical understanding, and knowledge of the AI’s underlying model architecture.

    In essence, the prompt engineer’s job is to “speak” the language of the AI model. Since AI models like GPT-3 or GPT-4 don’t “think” like humans, their responses depend heavily on how the question or task is framed. A prompt engineer ensures that the right context, constraints, and phrasing are in place to guide the model toward producing the most useful responses.

    Why is Prompt Engineering Important?

    While AI models are capable of generating human-like text and performing complex tasks, their outputs are highly sensitive to the structure of the prompt. The same AI model could provide vastly different answers depending on the way a question is asked. Prompt engineers understand this sensitivity and use it to maximize the effectiveness of the interaction with AI.

    Here are some reasons why prompt engineering is important:

    Maximizing output quality: Well-designed prompts improve the accuracy, relevance, and clarity of responses.
    Reducing errors: By properly framing a prompt, prompt engineers can help reduce misunderstandings or irrelevant responses.
    Efficiency: Instead of relying on trial and error to get useful responses, prompt engineers streamline the interaction process, saving time and resources.
    Contextuality: A good prompt will provide the necessary context for the model, ensuring that the response is in line with the user’s expectations.

    The Path of a Prompt Engineer

    The process that a prompt engineer follows to ensure optimal results involves several stages. Each stage builds upon the last, leading to an iterative cycle that refines both the prompt and the AI’s output. Here’s a breakdown of the typical path of a prompt engineer:

    The Path of a Prompt Engineer
    The Path of a Prompt Engineer

    1.Task Understanding:

    Before crafting any prompt, the first step is to fully understand the task at hand. This involves clarifying the user’s goal, determining the desired output format, and understanding the nuances of the request. A deep understanding of the problem ensures that the prompt engineer can craft a question or instruction that addresses all necessary aspects.

    Example: If the task is to generate a poem, the prompt engineer will need to understand the tone, style, and subject matter required.

    2.Crafting Prompts:

    The next step is to craft the prompt. This involves framing the task clearly, with enough specificity to guide the AI toward the desired output. Crafting an effective prompt is not about asking a single question, but about providing the model with the right context, constraints, and direction.

    Example: Instead of asking, “Write a poem,” a more specific prompt might be, “Write a rhyming poem about the beauty of autumn, focusing on imagery and feelings of nostalgia.”

    3.Prompt Alignment:

    At this stage, the prompt must be aligned with the intended outcome. This means considering the AI model’s strengths and limitations and ensuring that the prompt leads the AI to produce a response that fits the desired format, tone, and depth. The prompt should ensure the model understands the context of the task, as well as any constraints or preferences that need to be respected.

    Example: For a technical article, aligning the prompt would involve ensuring the language model knows to prioritize clarity, accuracy, and technical precision.

    4.Optimizing Prompt:

    After alignment, the prompt may need further refinement. This step involves fine-tuning the wording, simplifying complex instructions, or narrowing down the scope to ensure that the prompt is as effective as possible. Optimization often involves making the prompt more specific and reducing ambiguity.

    Example: “Write a 300-word summary of the research paper on AI ethics, emphasizing the ethical dilemmas and implications for technology companies.” This version is more optimized than a broad, vague instruction.

    5.AI Model Processing:

    Once the optimized prompt is provided, the AI model processes it and generates a response. This is where the model applies its underlying machine learning architecture, leveraging its training data to formulate a response.

    Example: The AI will analyze the prompt, consider patterns in its training data, and produce a response based on its understanding of the language and context.

    6.Generating Output:

    The AI model generates the initial output based on the prompt. Depending on the AI model’s capabilities, this output may vary in length, style, accuracy, or even relevance to the task.

    Example: If the task was to summarize a paper, the output might include key findings, conclusions, and references to methodology.

    7.Output Refinement:

    Once the output is generated, prompt engineers review and refine it. This may involve removing irrelevant information, adjusting tone, adding details, or improving clarity. In some cases, the output might need to be restructured to fit the desired format.

    Example: If the AI’s response contains tangential information or lacks clarity, the prompt engineer would reword it or fine-tune the output to better align with the user’s expectations.

    8.Iterative Improvement:

    Finally, the process of prompt engineering is iterative. After refining the output, prompt engineers analyze the effectiveness of the response and assess how the prompt can be improved for future tasks. This leads to continuous improvement, ensuring that future prompts are even more optimized, concise, and aligned with user needs.

    Example: The engineer might adjust the prompt for the next interaction to ensure more relevant details or a more focused response.

    Key Skills and Tools of a Prompt Engineer

    Prompt engineering requires a variety of skills:

    Understanding of Language Models:
    A prompt engineer should have a deep understanding of how LLMs like GPT process language. Knowing their strengths and weaknesses allows for better prompt design.

    Communication Skills:
    Effective communication is critical, as prompt engineers must be able to convey complex instructions in a way that the model can interpret clearly.

    Creativity and Experimentation:
    Crafting effective prompts often requires trial and error, testing different phrasings and structures to see what works best.

    Analytical Thinking:
    Understanding how different types of inputs influence the model’s outputs and iterating to improve results.

    In addition to these skills, prompt engineers also use tools to test and refine their prompts. For instance, platforms like OpenAI’s Playground allow users to experiment with various prompts in real-time, while more advanced professionals might leverage APIs to automate or scale their prompt engineering work.

    Best Practices for Prompt Engineering

    There are several strategies that a prompt engineer can employ to get the most out of a language model. Below are some of the best practices:

    1. Be Specific and Clear: Ambiguous prompts can confuse AI models, leading to vague or incorrect responses. Make sure the prompt is clear and as specific as possible.

      Example: Instead of asking, “Tell me about AI,” a more specific prompt would be, “Can you explain the difference between supervised and unsupervised learning in AI?”

    2. Use Context Effectively: Providing context can guide the model to better understand the desired output.

      Example: Instead of saying, “Write a poem,” say, “Write a rhyming poem about the beauty of autumn with a melancholic tone.”

    3. Limit the Scope: Sometimes, less is more. Limit the scope of the prompt to avoid overwhelming the model with too much information or too many instructions.

      Example: Instead of “Write an article about the importance of artificial intelligence in modern business, covering all aspects of AI from machine learning to natural language processing,” you could say, “Write a short article explaining the importance of AI in customer service.”

    4. Test and Iterate: A prompt engineer should test various iterations of a prompt to identify the most effective structure.

    5. Give Examples: For tasks requiring specific output formats, include an example to guide the model.

      Example: If you want a bulleted list, you could say, “List the steps in a process to build a website. For example: Step 1: Plan the layout.”

    6. Use Temperature and Max Tokens: Some models allow you to adjust the temperature (which controls randomness) and the max tokens (which sets a character limit) to control the output. These can be adjusted to fine-tune the model’s output.

    Comparing with and without Prompt Engineering

    Now let’s look at some concrete examples of how a well-crafted prompt versus a poorly constructed one can affect the outcome.

    Example 1: Writing a Research Summary

    • Without a Prompt Engineer:

    Prompt: “Summarize this research paper.”

    The AI may generate a generic or overly simplistic summary, without capturing the key aspects of the paper, such as methodology, results, and conclusions.

    • With a Prompt Engineer:

    Prompt: “Summarize the research paper titled ‘Exploring AI Ethics in Autonomous Vehicles.’ Focus on the methodology, key findings, and implications for policy. Keep the summary under 200 words.”

    The AI’s response will be more targeted, concise, and aligned with the user’s expectations, providing a detailed summary that addresses the core aspects of the paper.

    Example 2: Writing a Creative Story

    • Without a Prompt Engineer:

    Prompt: “Write a story.”

    The story might lack direction, coherence, or creativity, leading to a generic or even nonsensical narrative.

    • With a Prompt Engineer:

    Prompt: “Write a short story set in a post-apocalyptic world where humans are living on Mars. The protagonist is a scientist struggling with the ethical implications of using artificial intelligence to terraform the planet. Make the tone introspective and thought-provoking.”

    The story produced will be richer, more engaging, and aligned with the specific context and themes the user wanted.

    Example 3: Asking for Code

    • Without a Prompt Engineer:

    Prompt: “Write a Python function.”

    The AI may generate a simple function, but it may not meet the user’s needs or lack important features such as error handling or optimization.

    • With a Prompt Engineer:

    Prompt: “Write a Python function to validate an email address using regular expressions. The function should return True if the email is valid and False if it is invalid. It should also handle common edge cases such as missing domain names or incorrect characters.”

    The AI’s response will be much more precise, including the correct implementation, error handling, and edge case considerations.

    Conclusion

    In the world of AI, a Prompt Engineer plays a critical role in ensuring that AI models deliver optimal results. The expertise of prompt engineers can dramatically influence the quality, relevance, and accuracy of responses from language models like GPT-4. By following best practices—such as being specific, providing context, testing different iterations, and using examples—they can significantly improve the interaction between humans and AI.

    As AI continues to evolve, the role of prompt engineering will become even more important, helping users and businesses unlock the full potential of artificial intelligence. Whether it’s writing, problem-solving, or complex technical tasks, the way we interact with AI will increasingly depend on how well we craft our prompts.

    ]]>
    @@ -103,7 +77,7 @@ https://stonefishy.github.io/2024/11/01/understanding-the-azure-openai-gpt-4-api-role/ 2024-11-01T10:34:01.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z Azure OpenAI is a service provided by Microsoft that integrates OpenAI’s advanced language models into the Azure cloud platform. It allows developers to access and use OpenAI’s capabilities, such as natural language processing, code generation, and more, through Azure’s infrastructure.

    Recently, we have deployed our first version of the OpenAI GPT-4 model into the Azure cloud platform. This model is a powerful natural language model that can generate text based on a given prompt.

    What is GPT-4?

    GPT-4 is a transformer-based language model that was developed by OpenAI. It is a powerful language model that can generate text based on a given prompt. It has been trained on a large dataset of text and can generate coherent and engaging text that is often considered to be the next big thing in language models.

    How can I use the GPT-4 API?

    To use the GPT-4 API, you need to follow these steps:

    1. Create an Azure account.
    2. Create a resource group.
    3. Create a new OpenAI resource.
    4. Generate an API key.
    5. Use the API key to make API requests.

    1. Create an Azure account

    To use the GPT-4 API, you need to have an Azure account. If you don’t have one, you can create one for free by following the steps in the Azure sign-up page.

    2. Create a resource group

    Create a resource group to organize your Azure resources. To create a new resource group, follow these steps:

    1. Go to the Azure portal.
    2. Click on the Resource groups option in the left-hand menu.
    3. Click on the + Create button.
    4. Enter a name for your resource group and select your subscription.
    5. Click on the Review + create button.

    3. Create a new OpenAI resource

    To create a new OpenAI resource, follow these steps:

    1. Go to the Azure portal.
    2. Click on the Create a resource button.
    3. Search for OpenAI in the search bar.
    4. Click on the OpenAI resource.
    5. Click on the Create button.
    6. Enter a name for your OpenAI resource and select your subscription.
    7. Select the resource group you created earlier.
    8. Select the pricing tier.
    9. Click on the Create button.

    4. Generate an API key

    To generate an API key, follow these steps:

    1. Go to the Azure portal.
    2. Click on the All resources option in the left-hand menu.
    3. Search for your OpenAI resource.
    4. Click on the resource.
    5. Click on the Show access keys button.
    6. Copy the Key 1 value.

    5. Use the API key to make API requests

    To make API requests, we need to include the API key in the request headers. Here’s an example of how to make a request to the GPT-4 API with REST styles.

    1
    2
    3
    4
    curl $AZURE_OPENAI_ENDPOINT/openai/deployments/gpt-4o/chat/completions?api-version=2023-07-01-preview \
    -H "Content-Type: application/json" \
    -H "api-key: $AZURE_OPENAI_API_KEY" \
    -d '{"messages":[{"role": "system", "content": "There are 5 classifications: Suggestion, Meanless, Compliment, Complaint, Please provide a classification for user input."},{"role": "user", "content": "Does Azure OpenAI support customer managed keys?"},{"role": "assistant", "content": "A classification word"},{"role": "user", "content": "its great and easy to use"}]}'

    Place your API key and endpoint in the appropriate variables, and update which deployments model (gpt-4, gpt-4o or other models) and model version of your endpoint is using.

    The Azure OpenAI also supports multiple programming languages, including Python, JavaScript, and C#. You can use the API to generate text in your preferred programming language.

    GPT-4 Chat Roles

    In above message parameter, you may notice thata there are three roles: system, user, and assistant. The GPT-4 API supports three chat roles. Let digger deeper into each role:

    System: This role sets the context or guidelines for the conversation. It’s where you can specify instructions or constraints for how the assistant should behave throughout the interaction.

    User: This role represents the input from the person interacting with the model. Any questions or prompts posed by the user fall under this role.

    Assistant: This role is for the model’s responses. It contains the output generated by the assistant based on the user input and the context provided by the system.

    In another word. The system role sets the context, the user role represents the input, and the assistant role contains the output.

    Use system to define the conversation’s tone, behavior, or rules.
    Use user for all queries or statements made by the person.
    Use assistant for the model’s replies.

    Example

    Let’s say we want to classify the product feedback classification as Suggestion, Meanless, Compliment, Complaint, or Others. We can use the GPT-4 API to generate text based on the given prompt and classify the feedback.

    First, we define the context or guidelines to let the assistant know what result we want to achieve. Given below content to the system role.

    1
    {"role": "system", "content": "There are 5 classifications: Suggestion, Meanless, Compliment, Complaint, Please provide a classification for user input."},

    And, we only the OpenAI to reply me the classification word when user input is provided. So we define the assistant role as classification word.

    1
    {"role": "assistant", "content": "A classification word"}

    Now, we can ask the user to provide the feedback and provide the user role.

    1
    {"role": "user", "content": "its great and easy to use"}

    After the conversation, the assistant role will provide the classification word as Compliment. You will notice that there is a piece of json indicates the assistant role content value in the response. The OpenAI gpt-4o model knows “its great and easy to use” is a “Compliment” and provides the classification word as “Compliment”.

    1
    2
    3
    4
    "message": {
    "content": "Compliment",
    "role": "assistant"
    }
    Classification - its great and easy to use
    Classification - its great and easy to use

    Let’s try another user input.

    1
    {"role": "user", "content": "I'm in your walls"}
    Classification - I'm in your walls
    Classification - I'm in your walls

    The assistant role will provide the classification word as Meanless. Because this input is not meanful for any product feedback.

    Conclusion

    In this article, we have explored the role of the Azure OpenAI GPT-4 API and how it can be used to generate text. We have also learned about the chat roles and how to use them to classify the product feedback.

    ]]>
    @@ -130,7 +104,7 @@ https://stonefishy.github.io/2024/10/29/data-analysis-chart-by-generative-ai/ 2024-10-29T14:18:48.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z In data analysis, we often need to create charts to visualize the data by using BI tools, such as Tableau, Power BI, AWS Quicksight, or Qlik Sense. These tools allow us to create interactive and visually appealing charts, which can help us to identify patterns and trends in the data.

    General Solution Architecture for Data Analysis BI Chart

    The general data analysis BI chart solution architecture like below:

    General Solution Architecture for Data Analysis BI Chart
    General Solution Architecture for Data Analysis BI Chart
    .

    Usually, the engieering create business chart by using BI tools after data is ETL proceseed. If the business want to see the data distribution chart, they need to ask the data engineer to create the chart. The data engineer will create the chart using BI tools and share it with the business. This may take some time and effort.

    Solution Architecture for Data Analysis BI Chart by Generative AI

    Think about the scenario where the business want to see the data distribution chart without the data engineer’s help. How can we create the chart without the data engineer’s help?

    One way to create the data distribution chart without the data engineer’s help is to use a generative AI model. The business just need to describe what the data they want to see and want to display as which chart type. The generative AI application will create the chart for them.

    The core important thing we need to let the GenAI to understand user’s natural language and generate the information which application can be use. The solution architecture like below:

    AI Solution Architecture for Data Analysis BI Chart
    AI Solution Architecture for Data Analysis BI Chart
    .

    The AI application will take the user’s natural language as input and generate the chart for them. The AI application will use the following steps to generate the chart:

    1. Understand the user’s natural language and generate the chart title, chart type and chart related sql.
    2. Connect to the database or dataset and execute the chart related sql to get the data.
    3. Use the data to create the chart using the chart type.

    The AI model could be ChatGPT, OpenAI or Claude model. The AI model will generate the chart related sql, chart type and chart title based on the user’s natural language. The AI model will use the chart type to create the chart.

    For example, if the user’s natural language is “Show the distribution of the sales by product category”, the AI application will generate the chart title as “Sales Distribution by Product Category” and chart type as “Bar Chart”. The AI application will execute the following sql to get the data:

    1
    2
    3
    SELECT product_category, SUM(sales) as total_sales
    FROM sales_data
    GROUP BY product_category

    Below is a demo to generate the chart for the user’s natural language base on testing sample data.

    .

    User input natural language:

    1
    "I want to see the distribution of all product categories with duplicate devices removed, and exclude the empty category, please display it in a pie chart."
    .

    The AI model response below base on the user’s natural language and prompts.

    1
    2
    3
    4
    5
    {
    'sql': "SELECT category, COUNT(DISTINCT macaddress) as device_count FROM device_demo_data WHERE category != '' GROUP BY category ORDER BY device_count DESC",
    'chart': 'pie',
    'title': 'Distribution of Unique Devices by Product Category'
    }

    There is a question, how the AI model know to generate this data format for us? Actually it is because we provide the prompts to the AI model. The prompts will guide the AI model to generate the chart related sql, chart type and chart title. Below is sample prompts:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    You are a data analyst. Below is the table structure information about devices reported data. I will ask you questions, and then please generate the json data format {'sql':'','chart':'table','title':''} based on the questions I asked. 
    Emphasize: I only need this json data format. The 'sql' value is used by AWS Athena to query and generate the chart data.
    The 'chart' value is the chart type, 'numeric' represents a just a number, 'table' represents a table chart, 'pie' represents a pie chart, 'bar' represents a bar chart, 'line' represents a line chart.
    The 'title' value is the chart title. Please remember, I only need the json string format data, don't need other sentence.
    CREATE EXTERNAL TABLE `device_demo_data`(
    `macaddress` string COMMENT '设备mac地址 / The device macaddress',
    `productname` string COMMENT '设备产品名称 / The device product name',
    `category` string COMMENT '设备产品的分类 / The device product category',
    `country` string COMMENT '设备所在的国家 / The country where the device is located',
    `region` string COMMENT '设备上传数据所在的区域 / The region where the device data is reported',
    `default_region` string COMMENT '设备出厂设置的默认区域 / The default region set by the device',
    `oneosversion` string COMMENT '设备OneOS的版本号 / The OneOS version of the device',
    `firmwareversion` string COMMENT '设备的固件版本号 / The firmware version of the device',
    `officialversion` string COMMENT '设备是否是官方的发布版本,1为官方版本, 0为非官方版本 / Whether the device is an official release, 1 for official version, 0 for non-official version',
    `createtime` string COMMENT '设备上报数据的日期, 数据类型是字符串,日期格式是2024-09-01,表示2024年9月1日 / The date when the device reported data, the data type is string, the date format is 2024-09-01, which means September 1, 2024'
    )

    We tell AI model the data schema and each field means, and indicate only need the json format response with specific field. Once the AI model generate the response, we can use it to create the chart for the user’s natural language.

    We can also generate the line chart base on time line.

    User input natural language:

    1
    How many distinct devices reported every week, exclude the empty date, display the data in line graph
    .

    For the front-end UI to display the chart, we can use some chart library like Echarts, Hightcharts, D3.js or Chart.js. The front-end UI display the chart base on chart type and the data which queried by SQL.

    All these generated charts can be added into dashboard.

    .

    Limitation

    As you can see, the AI solution architecture is a new way to create BI chart. However, it still has some limitations. It can not generate the complex chart which like some BI tools advanced chart fucntionality. But it is still a good start to create the chart for business users.

    ]]>
    @@ -147,10 +121,10 @@ - - + +
    @@ -160,7 +134,7 @@ https://stonefishy.github.io/2024/10/23/intelligent-sql-generator-base-on-spring-ai-with-aws-bedrock/ 2024-10-23T15:01:12.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z The generative AI is a type of artificial intelligence (AI) that can learn from data and generate new data. In this article, we will discuss how to build an intelligent SQL generator using Spring AI and AWS Bedrock. For example, the application able to provide the data sql to us after we input the natural language questions, and we can query the data by using the sql, even display the chart base on data queried.

    Spring AI

    The Spring AI is a project of Spring. It support for all major AI Model providers such as Anthropic, OpenAI, Microsoft, Amazon, Google, and Ollama. Model type supports such as Chart Completion, Text to Image, Text to Speech, Translation, Audio Transcription and so on. It make it easy to integrate AI models into the application.

    Spring AI
    Spring AI

    AWS Bedrock

    Amazon Bedrock is a fully managed service that makes FMs from leading AI startups and Amazon available via an API, so you can choose from a wide range of FMs to find the model that is best suited for your use case. It contains Anthropic (Claude), Meta (Llama) and Stability AI models. In this blog, we will use Claude AI model of Anthropic to build our intelligent SQL generator.

    AWS Bedrock
    AWS Bedrock

    Building Intelligent SQL Generator

    Assuming we’re data analyzer, we have product_sales, products and customers three tables data in mysql. And we want to query the data by input natural language instead of write specific SQL manually. We can use AI to understand user natural langauge to generate the SQL base on table schemas. Let’s get start.

    Create a new Spring Boot project

    Create a Spring Boot project with restful api, add the following dependencies in maven pom.xml

    1
    2
    3
    4
    5
    6
    7
    8
    <dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    <dependency>
    <groupId>org.springframework.ai</groupId>
    <artifactId>spring-ai-bedrock-ai-spring-boot-starter</artifactId>
    </dependency>

    The spring-ai-bedrock-ai-spring-boot-starter is library that provides integration with AWS Bedrock models in Spring AI.

    Configure AWS Bedrock configuraitons

    In application.properties, configur below configurations.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    spring.application.name=spring-ai-datasql
    spring.ai.bedrock.aws.region=us-east-1
    spring.ai.bedrock.aws.timeout=5m
    spring.ai.bedrock.anthropic3.chat.enabled=true
    spring.ai.bedrock.anthropic3.chat.options.max-tokens=4000

    # config below AWS credential key, configure it in Java environments or System environments
    spring.ai.bedrock.aws.access-key=${AWS_ACCESS_KEY_ID}
    spring.ai.bedrock.aws.secret-key=${AWS_SECRET_ACCESS_KEY}

    Prepare prompts for AI

    Since we only need AI to generate the SQL base on mysql table schema. So we need prepare prompts to AI to fully understand our requirements. Below is prompts.txt

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    There are 3 mysql tables schema product_sales, products, customers.
    CREATE TABLE product_sales (
    sale_id INT AUTO_INCREMENT PRIMARY KEY,
    product_id INT NOT NULL,
    sale_date DATETIME DEFAULT CURRENT_TIMESTAMP,
    price DECIMAL(10, 2) NOT NULL,
    customer_id INT,
    region VARCHAR(100),
    FOREIGN KEY (product_id) REFERENCES products(product_id),
    FOREIGN KEY (customer_id) REFERENCES customers(customer_id)
    );
    CREATE TABLE products (
    product_id INT AUTO_INCREMENT PRIMARY KEY,
    product_name VARCHAR(100),
    product_category VARCHAR(100)
    );
    CREATE TABLE customers (
    customer_id INT AUTO_INCREMENT PRIMARY KEY,
    customer_name VARCHAR(100)
    );
    I will ask you question, please base on table schema to generate the SQL text in single line, please note I only need full correct sql text, do not
    need other text or any other characters.

    In above prompts, you can see it tells AI we only need SQL text base on the 3 mysql table schemas.

    Core Code

    Create a restful controller and pass the prompts and user input message to AI model.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    package spring.ai.datasql.controller;

    import org.springframework.ai.bedrock.anthropic3.BedrockAnthropic3ChatModel;
    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.web.bind.annotation.*;
    import spring.ai.datasql.service.PromptService;

    import java.io.IOException;
    import java.util.Map;

    @RestController
    public class SQLGenController {
    private final BedrockAnthropic3ChatModel chatModel;

    @Autowired
    private PromptService prompts;

    @Autowired
    public SQLGenController(BedrockAnthropic3ChatModel chatModel) {
    this.chatModel = chatModel;
    }

    @PostMapping("/ai/sql")
    public Map generate(@RequestBody String message) throws IOException {
    String newMsg = prompts.getContent() + message;
    return Map.of("sql", chatModel.call(newMsg));
    }
    }

    In above code, we inject BedrockAnthropic3ChatModel which is Anthropic model of AWS Bedrock provided by Spring AI. We also inject PromptService which is a service to read prompts.
    In generate method, we read prompts from PromptService and append user input message to it. Then we call chatModel.call method to generate the SQL text.

    Code of PromptService to read prompts from file.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    package spring.ai.datasql.service;
    import jakarta.annotation.PostConstruct;
    import org.springframework.beans.factory.annotation.Value;
    import org.springframework.stereotype.Service;

    import java.io.IOException;
    import java.nio.file.Files;
    import java.nio.file.Paths;

    @Service
    public class PromptService {

    @Value("classpath:prompts.txt")
    private org.springframework.core.io.Resource resource;

    private String fileContent;

    @PostConstruct
    public void init() throws IOException {
    fileContent = new String(Files.readAllBytes(Paths.get(resource.getURI())));
    }

    public String getContent() {
    return fileContent;
    }
    }

    Run the application

    Run the application and test the API by sending a request with user input message. We may encounter below errors.

    AWS Bedrock Model can not access
    AWS Bedrock Model can not access

    This is because the spring.ai.bedrock.anthropic3.chat.model in current Spring AI version default value is anthropic.claude-3-sonnet-20240229-v1:0. Let’s check the anthropic available Claude models in AWS Bedrock. Here we use Claude 3.5 AI model.

    AWS Bedrock Anthropic Claude Model Id
    AWS Bedrock Anthropic Claude Model Id

    Copy this model id and update the spring.ai.bedrock.anthropic3.chat.model in application.properties file. The fully updated application.properties file should be like below.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    spring.application.name=spring-ai-datasql
    spring.ai.bedrock.aws.region=us-east-1
    spring.ai.bedrock.aws.timeout=5m
    spring.ai.bedrock.anthropic3.chat.enabled=true
    spring.ai.bedrock.anthropic3.chat.options.max-tokens=4000
    spring.ai.bedrock.anthropic3.chat.model=anthropic.claude-3-5-sonnet-20240620-v1:0

    # config below AWS credential key, it also can be configure in Java environments or System environments
    spring.ai.bedrock.aws.access-key=${AWS_ACCESS_KEY_ID}
    spring.ai.bedrock.aws.secret-key=${AWS_SECRET_ACCESS_KEY}

    Now, we can run the application and test the API.

    Test Example 1

    Input:

    1
    What's the total prices of product sales ?

    Output:

    The response will be like below.

    1
    SELECT SUM(price) FROM product_sales;

    Postman Screenshot:

    The AI model can generate the SQL text base on user input message.

    Test Example 2

    Input:

    1
    How many customers by our products? I only need unique customers.

    Output:

    1
    SELECT COUNT(DISTINCT customer_id) FROM product_sales

    Postman Screenshot:

    Test Example 3

    Input:

    1
    I want to see the total sales prices for each product categories.

    Output:

    1
    SELECT product_category, SUM(price) AS total_sales FROM product_sales JOIN products ON product_sales.product_id = products.product_id GROUP BY product_category;

    Postman Screenshot:

    Test Example 4

    Input:

    1
    Please show me all sales data which contains price, sales date, customer name and product name and product categories

    Output:

    1
    SELECT ps.price, ps.sale_date, c.customer_name, p.product_name, p.product_category FROM product_sales ps JOIN products p ON ps.product_id = p.product_id JOIN customers c ON ps.customer_id = c.customer_id

    Postman Screenshot:

    Test Example 5

    Input:

    1
    Please show total sales prices of each product category on 2nd quarter this year

    Output:

    1
    SELECT p.product_category, SUM(ps.price) AS total_sales FROM product_sales ps JOIN products p ON ps.product_id = p.product_id WHERE YEAR(ps.sale_date) = YEAR(CURDATE()) AND QUARTER(ps.sale_date) = 2 GROUP BY p.product_category

    Postman Screenshot:

    As you can see the AI model can generate the SQL text base on user input message. Base on this function, we can download the data from mysql or display the chart base on data queried. It’s good for business analyst to query the data by natural language.

    ]]>
    @@ -194,7 +168,7 @@ https://stonefishy.github.io/2024/10/16/the-useeffect-of-react-runs-twice-in-development-mode/ 2024-10-16T16:23:44.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z Have you noticed that your useEffect hook of React runs twice when the page first loads in development mode? This occurs because since React 18, it can be confusing, especially for new developers. Let’s explore why this happens and what it means.

    React Hook useEffect
    React Hook useEffect

    What is useEffect?

    useEffect is a hook that allows you to perform side effects in your components. Side effects can be things like fetching data, subscribing to events, or changing the DOM. This hook takes two arguments:

    1. A function to run your side effect.
    2. An optional array of dependencies that tells React when to run the effect again.

    Here’s a simple example of useEffect in action:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    import React, { useEffect, useState } from 'react';

    function ExampleComponent() {
    const [count, setCount] = useState(0);

    useEffect(() => {
    console.log('Effect has been executed');
    // Side effect logic here
    }, [count]);

    return (
    <div>
    <p>You clicked {count} times</p>
    <button onClick={() => setCount(count + 1)}>Click me</button>
    </div>
    );
    }

    In above code, the log ‘Effect has been executed’ will be printed to the console twice when the component is first render on development mode.

    React useEffect runs twice in development mode
    React useEffect runs twice in development mode

    Why Does useEffect Run Twice in Development Mode?

    When you run your React app in development mode, you might see that the useEffect runs twice when the component loads for the first time. This can be confusing, especially for new developers.

    Reasons for the Double Execution

    Strict Mode: This behavior is part of React’s Strict Mode. It purposely runs certain lifecycle methods and hooks like useEffect twice during development. This helps check if your code can handle side effects correctly.

    Testing Effects: By running the effect two times, React tests if your side effects can handle being called multiple times without causing bugs. This helps catch problems early.

    What Happens in Production?

    The double call only happens in development mode. When you make your app for production

    How to Handle the Double Execution

    Here are some tips for dealing with the double execution of useEffect:

    Be Careful with State Updates: If your effect updates state, make sure it’s safe to run the effect multiple times without causing issues.

    Use Cleanup Functions: Always return a cleanup function from your useEffect to free up resources and avoid memory issues.

    1
    2
    3
    4
    5
    6
    7
    useEffect(() => {
    // Your side effect code here

    return () => {
    // Cleanup code here
    };
    }, [dependencies]);

    Test Your Effects: Use the extra invocation to ensure that your effects work correctly.

    Disable Strict Mode

    It is not recommend this way, but if you want to disable Strict Mode, in React 18 you can disable Strict Mode by removing the <React.StrictMode> tag from the return statement in your root component.

    In Next.js, you can disable Strict Mode by setting the following parameter in next.config.js:

    1
    2
    3
    module.exports = {
    reactStrictMode: false,
    }

    Conclusion

    Seeing useEffect run twice in development mode can be surprising Understanding this behavior and preparing your code for it will allow you to use React hooks effectively and build better applications.

    Even though this might seem confusing at first, it’s an important part of the React development experience. Happy coding!

    ]]>
    @@ -220,7 +194,7 @@ https://stonefishy.github.io/2024/10/09/aws-glue-iceberg-tables-schema-can-t-be-updated-with-pulumi/ 2024-10-09T09:46:22.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.361Z Context

    Recently, we’re planing to use Pulumi to manage all current existing AWS Glue Datacatalog tables which are Iceberg format. For the Iceberg tables, I have post a blog before to talk about what is iceberg and what’s feature of it. Here is post link: https://stonefishy.github.io/2020/05/23/what-is-apache-iceberg/

    To manage the AWS Glue Iceberg tables with Pulumi, due to our catalog table schemas are continue changes base on requirements. We need to do some technical POC whethere the pulumi can also support to update the iceberg metadata schema as well.

    Create Glue Iceberg Table

    We’re using Pulumi to manage the AWS Cloud Infrastructure. Before create glue table, a glue database is indeed.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    import pulumi
    import pulumi_aws as aws

    pulumi_database_test = aws.glue.CatalogDatabase("pulumi_database_test",
    create_table_default_permissions=[aws.glue.CatalogDatabaseCreateTableDefaultPermissionArgs(
    permissions=["ALL"],
    principal=aws.glue.CatalogDatabaseCreateTableDefaultPermissionPrincipalArgs(
    data_lake_principal_identifier="IAM_ALLOWED_PRINCIPALS",
    ),
    )],
    name="pulumi_database_test")

    Above code is to create a glue database named pulumi_database_test. Next Step is to create a glue table with Iceberg format.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    import pulumi
    import pulumi_aws as aws

    pulumi_external_table_test = aws.glue.CatalogTable("pulumi_external_table_test",
    database_name="pulumi_database_test",
    name="pulumi_external_table_test",
    storage_descriptor=aws.glue.CatalogTableStorageDescriptorArgs(
    additional_locations=["s3://xxx/pulumi_external_table_test/data"],
    columns=[
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test1",
    type="string",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test2",
    type="string",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test3",
    type="boolean",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test4",
    type="string",
    )
    ],
    location="s3://xxx/pulumi_external_table_test",
    ),
    table_type="EXTERNAL_TABLE",
    open_table_format_input=aws.glue.CatalogTableOpenTableFormatInputArgs(
    iceberg_input=aws.glue.CatalogTableOpenTableFormatInputIcebergInputArgs(
    metadata_operation="CREATE"
    )
    ),
    opts=pulumi.ResourceOptions(protect=False)
    )

    There is important thing to notice here is that we need to set open_table_format_input with iceberg_input and set metadata_operation as CREATE. This is because we want to create a new Iceberg table with new schema.

    Below is glue iceberg table created screenshot. You can see the 4 fields is added in schema and table format is Apache Iceberg.

    AWS Glue Table Schema Created by Pulumi
    AWS Glue Table Schema Created by Pulumi

    Next, let’s check the important file that is Apache Iceberg metadata file which is located in s3://xxx/pulumi_external_table_test/metadata/. Download this json file 00006-fd122b03-a7aa-42cf-8fec-001535a9fcf5.metadata.json from S3. The 4 fields are defined in metadata json file. That is good. The metadata json is created as well when creating glue table.

    AWS Glue Table Iceberg metadata
    AWS Glue Table Iceberg metadata

    Insert new data in Glue iceberg table

    Let’s using AWS Athena to insert a test data in the table.

    1
    INSERT INTO pulumi_database_test.pulumi_external_table_test(test1,test2,test3,test4) VALUES('1a', '2a', true, '4a')

    The data is insert success and we can use SELECT sql to query the data.

    Query inserted data in AWS Glue Iceberg table
    Query inserted data in AWS Glue Iceberg table

    In Iceberg table, we can insert, update, delete data as well.

    Update Glue Iceberg table schema

    Let’s add a new field test5 in the glue iceberg table base on previous code.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    import pulumi
    import pulumi_aws as aws

    pulumi_external_table_test = aws.glue.CatalogTable("pulumi_external_table_test",
    database_name="pulumi_database_test",
    name="pulumi_external_table_test",
    storage_descriptor=aws.glue.CatalogTableStorageDescriptorArgs(
    additional_locations=["s3://xxx/pulumi_external_table_test/data"],
    columns=[
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test1",
    type="string",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test2",
    type="string",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test3",
    type="boolean",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test4",
    type="string",
    ),
    aws.glue.CatalogTableStorageDescriptorColumnArgs(
    name="test5",
    type="string",
    )
    ],
    location="s3://xxx/pulumi_external_table_test",
    ),
    table_type="EXTERNAL_TABLE",
    open_table_format_input=aws.glue.CatalogTableOpenTableFormatInputArgs(
    iceberg_input=aws.glue.CatalogTableOpenTableFormatInputIcebergInputArgs(
    metadata_operation="CREATE"
    )
    ),
    opts=pulumi.ResourceOptions(protect=False)
    )

    Execute pulumi up command to update the glue table schema.

    Pulumi update AWS Glue Iceberg table schema
    Pulumi update AWS Glue Iceberg table schema

    After that, we can check the glue table schema is updated to add a new field test5.

    AWS Glue iceberg table new field
    AWS Glue iceberg table new field

    Let’s insert new data in the table with new field test5 and run it in AWS Athena.

    1
    INSERT INTO pulumi_database_test.pulumi_external_table_test(test1,test2,test3,test4,test5) VALUES('1b', '2b', true, '4b', '5b')

    The Athena execute show below errors:

    1
    2
    COLUMN_NOT_FOUND: Insert column name does not exist in target table: test5. If a data manifest file was generated at 's3://xxxxxx/4c346103-60d2-45ea-9813-d7060bd5efe9/Unsaved/2024/10/09/37f67a67-4604-43cb-b113-af351c363a51-manifest.csv', you may need to manually clean the data from locations specified in the manifest. Athena will not delete data in your account.
    This query ran against the "pulumi_database_test" database, unless qualified by the query. Please post the error message on our forum or contact customer support with Query Id: 37f67a67-4604-43cb-b113-af351c363a51
    AWS Athena insert glue iceberg table failed
    AWS Athena insert glue iceberg table failed

    But when we check the Apache Iceberg metadata file again. The new field test5 is not added in the new metadata file. That’s why the insert new data with new field failed.

    AWS Glue iceberg table metadata not updated
    AWS Glue iceberg table metadata not updated

    Conclusion

    In Pulumi documentation. The metadata_operation of iceberg_input in open_table_format_input is only support CREATE value. It seems it only can create the iceberg metadata file when glue table created.

    Pulumi API Doc
    Pulumi API Doc

    It seems this is pulumi issue. It is not updating the iceberg metadata file when the glue table schema is updated. I’ve raised a issue to pulumi, here is issue link: https://github.com/pulumi/pulumi/issues/17516. Hope this issue can be fixed soon.

    Mean while, I found there is same issue in Terraform which also can not update the iceberg metadata file when the glue table schema is updated. Terraform issue link here https://github.com/hashicorp/terraform-provider-aws/issues/36641.

    ]]> @@ -235,14 +209,14 @@ + + - - @@ -252,7 +226,7 @@ https://stonefishy.github.io/2024/09/19/user-survey-feedback-sentiment-analysis-base-on-aws-cloud-solution/ 2024-09-19T14:00:33.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z Introduction

    The App Product User Survey Feedback Sentiment Analysis Solution is a cloud-based solution that uses AWS services to analyze user feedback and sentiment of the app product. The solution uses Amazon Comprehend to perform sentiment analysis on the feedback and Amazon S3 to store the data. The solution is designed to be scalable and cost-effective, and can be easily integrated into any app product.

    Basically, our survey feedback file is Excel file that contains the user feedback of app and related application info such as OS verion and app version. The feedback text is different language from global users, so we need to translate the text into English using Amazon Translate. Besides, the feedback file is generated monthly. So, the solution will extract the feedback data from the Excel file, translate the text into English using Amazon Translate, perform sentiment analysis using Amazon Comprehend, and store the data in Amazon S3. The solution will also provide a dashboard to visualize the sentiment analysis results.

    The Amazon Comprehend is a natural language processing (NLP) service that can analyze text and extract insights such as sentiment, syntax, entities, and key phrases.

    Solution Architecture

    Below is the high-level architecture of the solution:

    AWS Sentiment Analysis Solution
    AWS Sentiment Analysis Solution

    AWS Services Used

    The solution uses the following AWS services:
    AWS S3: Amazon Simple Storage Service (S3) is a scalable object storage service that can store large amounts of data.
    AWS Lambda: AWS Lambda is a serverless compute service that can run code without provisioning or managing servers.
    AWS Comprehend: Amazon Comprehend is a natural language processing (NLP) service that can analyze text and extract insights such as sentiment, syntax, entities, and key phrases.
    AWS SNS: Amazon Simple Notification Service (SNS) is a messaging service that can be used to send notifications to users.
    AWS Translate: Amazon Translate is a machine translation service that can translate text from one language to another.
    AWS SQS: Amazon Simple Queue Service (SQS) is a messaging service that can be used to store and process large amounts of messages.
    AWS CloudWatch: Amazon CloudWatch is a monitoring service that can be used to monitor the solution and generate metrics.
    AWS Glue: Amazon Glue is a serverless ETL (extract, transform, and load) service that can be used to extract data from the survey feedback file and store it in Amazon S3.
    AWS Athena: Amazon Athena is a serverless data analytics service that can be used to query and analyze data stored in Amazon S3.
    AWS QuickSight: Amazon QuickSight is a business intelligence (BI) service that can be used to create visualizations and dashboards based on the sentiment analysis results.

    Solution Implementation

    The solution implementation is divided into the following steps:

    1. Create an Amazon S3 bucket as raw data bucket to store the survey feedback Excel file.
    2. Uploaded a survey feedback Excel file to the S3 bucket to trigger the AWS Lambda function.
    3. AWS Lambda to extract the survey feedback data from the Excel file, translate the text into English using Amazon Translate, sentiment analysis using Amazon Comprehend, and store the data as Parquet format in another Amazon S3 Bucket.
    4. Create an Amazon SNS topic to notify users by email when the Lambda process data failed.
    5. Create an Amazon CloudWatch to log the lamdba exeuction logs, generate metrics.
    6. AWS Glue Crawler to extract the parquet data from the processed amazon S3 bucket and generate a table schema.
    7. Using Amazon Athena to query the data from the processed Amazon S3 bucket.
    8. Create an Amazon QuickSight dashboard to visualize the sentiment analysis results.

    The AWS Lambda core function code is as follows:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    import json
    import boto3
    import pandas as pd
    import pyarrow as pa
    import pyarrow.parquet as pq
    import urllib.parse as urlparse
    import io
    import os


    S3_TARGET_BUCKET = os.environ['QE_SURVEY_PROCESSED_TARGET_BUCKET']
    SNS_TOPIC_ARN = os.environ['SNS_TOPIC_ARN']


    def translate_feedbacks(feedbacks):
    translate = boto3.client('translate')
    feedbacks_en = []
    print(f"calling AWS Translate to auto detect language and translate feedback, total {feedbacks.__len__()}")

    for feedback in feedbacks:
    response = translate.translate_text(
    Text=feedback,
    SourceLanguageCode='auto', # Detect source language automatically
    TargetLanguageCode='en'
    )
    feedbacks_en.append(response['TranslatedText'])
    print(f"Finished transalate sourceText: {feedback} to targetText: {response['TranslatedText']}")

    return feedbacks_en


    def comprehend_sentiment(feedbacks):
    comprehend = boto3.client('comprehend')
    batch_size = 25
    all_sentiments = []
    print(f"calling AWS Comprehend AI to analysis feedback, total {feedbacks.__len__()}")

    for i in range(0, len(feedbacks), batch_size):
    batch_feedbacks = feedbacks[i:i+batch_size]
    print(f"calling AWS Comprehend AI to analysis feedback, batch {i} - {i+batch_size}")
    comprehend_response = comprehend.batch_detect_sentiment(TextList=batch_feedbacks, LanguageCode='en')
    sentiments = [response['Sentiment'] for response in comprehend_response['ResultList']]
    all_sentiments.extend(sentiments)

    return all_sentiments


    def lambda_handler(event, context):
    s3 = boto3.client('s3')
    sns_client = boto3.client('sns')

    source_bucket = event['Records'][0]['s3']['bucket']['name']
    source_key = urlparse.unquote_plus(event['Records'][0]['s3']['object']['key'])

    target_bucket = S3_TARGET_BUCKET
    target_key = f"qe-survey/{source_key.replace('.xlsx', '.parquet')}"

    try:
    response = s3.get_object(Bucket=source_bucket, Key=source_key)
    excel_file = response['Body'].read()

    columns = ['ce_timestamp', 'ce_host_id', 'ce_host_os', 'ce_hw', 'ce_fw', 'ce_sw', 'survey_feedback', 'survey_rating']
    df = pd.read_excel(io.BytesIO(excel_file), usecols=columns)
    df['ce_timestamp'] = pd.to_datetime(df['ce_timestamp'], format='%Y-%m-%d %H:%M:%S:%f')
    df['survey_feedback'] = df['survey_feedback'].astype(str)
    valid_df = df[df['survey_feedback'].apply(lambda x: x.strip() != '')]
    valid_df = valid_df[valid_df['survey_feedback'] != 'nan']

    feedbacks = valid_df['survey_feedback'].tolist()

    feedbacks_en = translate_feedbacks(feedbacks)
    valid_df["survey_feedback_en"]= feedbacks_en
    valid_df['sentiment'] = comprehend_sentiment(feedbacks_en)

    parquet_buffer = io.BytesIO()
    pq.write_table(pa.Table.from_pandas(valid_df), parquet_buffer)
    parquet_buffer.seek(0)

    s3.put_object(Bucket=target_bucket, Key=target_key, Body=parquet_buffer)
    print(f'{source_key} File converted to Parquet {target_key} and stored in S3 bucket {target_bucket} successfully')

    return {
    'statusCode': 200,
    'body': json.dumps(f'{source_key} File converted to Parquet {target_key} and stored in S3 bucket {target_bucket} successfully')
    }
    except Exception as e:
    print(f'Error processing {source_key}: {e}')
    sns_client.publish(
    TopicArn=SNS_TOPIC_ARN,
    Subject='Lambda Function Processing QE Survey Feedback Failure Notification',
    Message=f'An error occurred: {str(e)}'
    )

    The above code extracts the survey feedback data from the Excel file, translates the text into English using Amazon Translate, performs sentiment analysis using Amazon Comprehend, and stores the data as Parquet format in another Amazon S3 Bucket. also notify users by email when the Lambda process data failed by using Amazon SNS.

    Below is sentiment analysis results visualization using Amazon QuickSight:

    AWS Sentiment Analysis Results
    AWS Sentiment Analysis Results

    This is a high-level overview of the solution implementation. The solution can be further customized and enhanced based on the specific requirements of the app product.

    ]]> @@ -267,14 +241,14 @@ + + - - @@ -282,7 +256,7 @@ https://stonefishy.github.io/2024/09/10/aws-s3-event-replacing-space-with-character-sign-in-object-key-name/ 2024-09-10T09:22:34.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z The issue with AWS S3 Event notifications is that it replaces spaces with ‘+’ character sign in the object key name. This can cause issues when trying to access the object in S3. It will occurs NoSuchKey error if not handling this issue properly.

    Background

    Configured a S3 bucket put event notification to a Lambda function. The Lambda function will be triggered when a new object is uploaded to the S3 bucket. I upload a file named ‘2023 2nd quarter QE survey raw data.xlsx’ into S3 bucket, the Lambda function is triggered, but when I try to access the object in S3, I get NoSuchKey error in AWS CloudWatch Logs. Debugging shows that the object key name is ‘2023+2nd+quarter+QE+survey+raw+data.xlsx’ instead of ‘2023 2nd quarter QE survey raw data.xlsx’. It means the S3 event notification is replacing the space with ‘+’ character sign in the object key name.

    The detail error is below:

    1
    [ERROR] NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist.

    Solution

    There are two solutions to fix this issue, one is from AWS S3 upload file side, another is from Lambda function side.

    1. From AWS S3 upload file side:

    If you have control over the upload process, ensure that the keys are properly URL-encoded.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    import boto3
    import urllib.parse

    s3_client = boto3.client('s3')

    # Example key with spaces
    object_key = "2023 2nd quarter QE survey raw data.xlsx"
    encoded_key = urllib.parse.quote(object_key)

    # Upload the object
    s3_client.upload_file("/tmp/2023 2nd quarter QE survey raw data.xlsx", "my-bucket-name", encoded_key)

    This will ensure that the object key name is properly URL-encoded, which will prevent the S3 event notification from replacing spaces with ‘+’ character sign. The urllib.parse.quote function will replace spaces with ‘%20’ and ‘+’ with ‘%2B’.

    2. From Lambda function side:

    To fix this issue, we need to modify the Lambda function to handle the object key name with ‘+’ character sign. We can use the urllib.parse package to handle it. The package library provide the function unquote_plus to replace ‘+’ with space. Here is the code to handle the object key name with ‘+’ character sign:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    import os
    import urllib.parse as urlparse
    import boto3

    s3 = boto3.client('s3')

    def lambda_handler(event, context):
    bucket_name = event['Records'][0]['s3']['bucket']['name']
    object_key = event['Records'][0]['s3']['object']['key']
    # Replace '+' with space
    object_key = urlparse.unquote_plus(object_key)
    # Download the object
    s3.download_file(bucket_name, object_key, '/tmp/file.txt')
    # Do something with the downloaded file
    #...

    In the above code, we first get the bucket name and object key from the S3 event notification. We then use the urlparse.unquote_plus function to replace ‘+’ with space in the object key name. Finally, we download the object using the s3.download_file function.

    Conclusion

    To fix the issue with AWS S3 Event notifications replacing space with ‘+’ character sign in the object key name, we need to handle it properly in the Lambda function. We can use the urllib.parse package to handle it.

    ]]>
    @@ -297,12 +271,12 @@ + + - - @@ -314,9 +288,9 @@ https://stonefishy.github.io/2024/08/16/introduce-is-distinct-from-in-sql/ 2024-08-16T09:22:15.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z - 在SQL查询中,比较操作符 = 通常用于检查两个值是否相等。然而,当涉及到处理缺失值(NULL)时,这种操作符就会面临挑战。为了解决这一问题,SQL 提供了 `IS DISTINCT FROM` 操作符,它用于精确比较两个值是否不同,即使这些值中有 NULL。本文将详细介绍 IS DISTINCT FROM 的语法、解决的问题以及常见的使用场景。

    语法

    IS DISTINCT FROM 的基本语法如下:

    1
    expression1 IS DISTINCT FROM expression2

    其中 expression1 和 expression2 是要进行比较的两个表达式。 该操作符返回布尔值:TRUE、FALSE。

    主要解决的问题

    在SQL中,NULL 值代表缺失或未知的数据。当两个表达式中至少有一个为 NULL 时,使用传统的比较操作符(如 = 或 <>)进行比较会导致不确定的结果。具体来说:

    • expression1 = expression2 在 expression1 或 expression2 为 NULL 时会返回 UNKNOWN。
    • expression1 <> expression2 在 expression1 或 expression2 为 NULL 时也会返回 UNKNOWN。

    比如下面的查询语句:

    1
    2
    3
    4
    5
    6
    7
    select 
    1 = NULL as A1,
    NULL <> 1 as A2,
    NULL = NULL as A3,
    NULL <> NULL as A4,
    1 = 1 as A5,
    1 <> 1 as A6

    会返回以下结果:

    A1A2A3A4A5A6
    TRUEFALSE

    可以看到,当 expression1 或 expression2 为 NULL 时,传统的比较操作符会返回 UNKNOWN空值, 如上面的A1, A2, A3, A4的结果值,这就会导致不确定性。

    IS DISTINCT FROM 操作符的出现,解决了这些问题。它能正确处理 NULL 值,会返回 TRUE 或 FALSE,确保结果的可靠性。 在以下情况下返回 TRUE:

    • expression1 和 expression2 都为 NULL。
    • expression1 和 expression2 的值不同(不论是否为 NULL)。

    而在 expression1 和 expression2 相等(包括都是 NULL)的情况下,IS DISTINCT FROM 返回 FALSE。 另外还有一个 IS NOT DISTINCT FROM 操作符,用于判断两个值是否相等。其用法一样,只是语义相反。

    下面的例子查询:

    1
    2
    3
    4
    5
    6
    7
    SELECT 
    1 IS DISTINCT FROM NULL as B1,
    NULL IS DISTINCT FROM 1 as B2,
    1 IS DISTINCT FROM 2 as B3,
    NULL IS DISTINCT FROM NULL as B4,
    1 IS DISTINCT FROM 1 as B5,
    1 IS NOT DISTINCT FROM 1 as B6

    查询结果如下:

    B1B2B3B4B5B6
    TRUETRUETRUEFALSEFALSETRUE

    可以看到,IS DISTINCT FROM 正确处理 NULL 值,返回 TRUE 或 FALSE,确保结果的可靠性。

    使用场景

    数据清洗和验证

    数据清洗数据验证过程中,经常需要检查数据库中的值是否不同,包括对 NULL 值的处理。例如,比较用户输入的数据与现有记录,以确定是否有不同的记录。使用 IS DISTINCT FROM 可以更准确地处理 NULL 值,避免出现错误或遗漏。

    1
    2
    3
    SELECT *
    FROM users
    WHERE username IS DISTINCT FROM 'andrewsy';

    这条查询会返回所有 username 与 ‘andrewsy’ 不同的记录,包括那些 username 为 NULL 的记录。

    数据更新

    在更新数据时,使用 IS DISTINCT FROM 可以确保只有在数据实际变化时才进行更新,从而避免不必要的更新操作。

    1
    2
    3
    UPDATE users
    SET email = 'new_andrewsy@email.com'
    WHERE email IS DISTINCT FROM 'new_andrewsy@email.com';

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    数据比较

    在进行复杂的数据比较时,尤其是涉及到 NULL 值时,IS DISTINCT FROM 提供了更直观的比较逻辑。例如,在合并两个数据集时,可以使用此操作符来确保唯一性。

    1
    2
    3
    4
    5
    6
    7
    8
    SELECT 
    *
    FROM
    dataset1
    FULL OUTER JOIN
    dataset2
    ON
    dataset1.id IS DISTINCT FROM dataset2.id

    这条查询会找出两个数据集中 id 不同的记录,包括 id 为 NULL 的情况。

    注意事项

    IS DISTINCT FROM 是 SQL 标准中的一部分,但并非所有数据库系统都支持。具体的支持情况需要查阅数据库的文档。在使用 IS DISTINCT FROM 时,确保数据库系统的版本和文档中对此操作符的支持及行为一致。

    总结

    IS DISTINCT FROM 是一个强大的工具,用于在 SQL 中处理包含 NULL 值的数据比较。它解决了传统比较操作符在处理 NULL 值时的不足,使得数据验证、更新和比较更加准确和可靠。在实际应用中,根据数据库系统的支持情况,合理使用 IS DISTINCT FROM 可以显著提高数据操作的精确性和健壮性。

    ]]>
    + 在SQL查询中,比较操作符 = 通常用于检查两个值是否相等。然而,当涉及到处理缺失值(NULL)时,这种操作符就会面临挑战。为了解决这一问题,SQL 提供了 `IS DISTINCT FROM` 操作符,它用于精确比较两个值是否不同,即使这些值中有 NULL。本文将详细介绍 IS DISTINCT FROM 的语法、解决的问题以及常见的使用场景。

    语法

    IS DISTINCT FROM 的基本语法如下:

    1
    expression1 IS DISTINCT FROM expression2

    其中 expression1 和 expression2 是要进行比较的两个表达式。 该操作符返回布尔值:TRUE、FALSE。

    主要解决的问题

    在SQL中,NULL 值代表缺失或未知的数据。当两个表达式中至少有一个为 NULL 时,使用传统的比较操作符(如 = 或 <>)进行比较会导致不确定的结果。具体来说:

    • expression1 = expression2 在 expression1 或 expression2 为 NULL 时会返回 UNKNOWN。
    • expression1 <> expression2 在 expression1 或 expression2 为 NULL 时也会返回 UNKNOWN。

    比如下面的查询语句:

    1
    2
    3
    4
    5
    6
    7
    select 
    1 = NULL as A1,
    NULL <> 1 as A2,
    NULL = NULL as A3,
    NULL <> NULL as A4,
    1 = 1 as A5,
    1 <> 1 as A6

    会返回以下结果:

    A1A2A3A4A5A6
    TRUEFALSE

    可以看到,当 expression1 或 expression2 为 NULL 时,传统的比较操作符会返回 UNKNOWN空值, 如上面的A1, A2, A3, A4的结果值,这就会导致不确定性。

    IS DISTINCT FROM 操作符的出现,解决了这些问题。它能正确处理 NULL 值,会返回 TRUE 或 FALSE,确保结果的可靠性。 在以下情况下返回 TRUE:

    • expression1 和 expression2 都为 NULL。
    • expression1 和 expression2 的值不同(不论是否为 NULL)。

    而在 expression1 和 expression2 相等(包括都是 NULL)的情况下,IS DISTINCT FROM 返回 FALSE。 另外还有一个 IS NOT DISTINCT FROM 操作符,用于判断两个值是否相等。其用法一样,只是语义相反。

    下面的例子查询:

    1
    2
    3
    4
    5
    6
    7
    SELECT 
    1 IS DISTINCT FROM NULL as B1,
    NULL IS DISTINCT FROM 1 as B2,
    1 IS DISTINCT FROM 2 as B3,
    NULL IS DISTINCT FROM NULL as B4,
    1 IS DISTINCT FROM 1 as B5,
    1 IS NOT DISTINCT FROM 1 as B6

    查询结果如下:

    B1B2B3B4B5B6
    TRUETRUETRUEFALSEFALSETRUE

    可以看到,IS DISTINCT FROM 正确处理 NULL 值,返回 TRUE 或 FALSE,确保结果的可靠性。

    使用场景

    数据清洗和验证

    数据清洗数据验证过程中,经常需要检查数据库中的值是否不同,包括对 NULL 值的处理。例如,比较用户输入的数据与现有记录,以确定是否有不同的记录。使用 IS DISTINCT FROM 可以更准确地处理 NULL 值,避免出现错误或遗漏。

    1
    2
    3
    SELECT *
    FROM users
    WHERE username IS DISTINCT FROM 'andrewsy';

    这条查询会返回所有 username 与 ‘andrewsy’ 不同的记录,包括那些 username 为 NULL 的记录。

    数据更新

    在更新数据时,使用 IS DISTINCT FROM 可以确保只有在数据实际变化时才进行更新,从而避免不必要的更新操作。

    1
    2
    3
    UPDATE users
    SET email = 'new_andrewsy@email.com'
    WHERE email IS DISTINCT FROM 'new_andrewsy@email.com';

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    数据比较

    在进行复杂的数据比较时,尤其是涉及到 NULL 值时,IS DISTINCT FROM 提供了更直观的比较逻辑。例如,在合并两个数据集时,可以使用此操作符来确保唯一性。

    1
    2
    3
    4
    5
    6
    7
    8
    SELECT 
    *
    FROM
    dataset1
    FULL OUTER JOIN
    dataset2
    ON
    dataset1.id IS DISTINCT FROM dataset2.id

    这条查询会找出两个数据集中 id 不同的记录,包括 id 为 NULL 的情况。

    注意事项

    IS DISTINCT FROM 是 SQL 标准中的一部分,但并非所有数据库系统都支持。具体的支持情况需要查阅数据库的文档。在使用 IS DISTINCT FROM 时,确保数据库系统的版本和文档中对此操作符的支持及行为一致。

    总结

    IS DISTINCT FROM 是一个强大的工具,用于在 SQL 中处理包含 NULL 值的数据比较。它解决了传统比较操作符在处理 NULL 值时的不足,使得数据验证、更新和比较更加准确和可靠。在实际应用中,根据数据库系统的支持情况,合理使用 IS DISTINCT FROM 可以显著提高数据操作的精确性和健壮性。

    ]]>
    @@ -342,7 +316,7 @@ https://stonefishy.github.io/2024/08/15/what-is-lag-in-sql/ 2024-08-15T09:24:21.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z 最近在做数据分析,需要挖掘数据随时间变化的信息。所有数据物理存储在AWS S3上,通过AWS Glue Catalog和AWS Athena进行数据查询。AWS Athena支持SQL语言,可以对数据进行分析。在处理时间序列数据或分析行间变化时, SQL中的 LAG 函数和 LEAD 函数是非常有用的。下面,我们来看一下 LAG 函数的基本用法。

    什么是 LAG 函数?

    在 SQL 中,LAG 函数是一种窗口函数用于获取当前行之前某一行的值。这在处理时间序列数据或分析行间变化时非常有用。LAG 函数可以让你访问当前行之前的行数据,而不需要使用子查询或自连接。LEAD 函数则是获取当前行之后的行数据。他们的语法和用法类似,只是方向不同。

    LAG 函数的语法

    1
    LAG(expression, offset, default) OVER (PARTITION BY partition_column ORDER BY order_column)
    • expression:要返回的列或计算结果。
    • offset:向前查找的行数,默认为 1(即前一行)。
    • default:当没有前行时返回的默认值,默认为 NULL。
    • PARTITION BY:用于将数据分组。如果省略,LAG 会在整个结果集上应用。
    • ORDER BY:确定行的顺序,LAG 函数会根据这个顺序来访问前面的行。

    如何使用 LAG 函数

    LAG 函数在使用时通常与 OVER 子句一起使用。OVER 子句用于定义窗口(即应用 LAG 函数的范围)。在窗口中,ORDER BY 确定了行的顺序,PARTITION BY 则可以用来将数据分组,使每个分组内的计算互相独立。

    基本使用示例:

    假设我们有一个名为 sales 的表,包含 sale_date 和 amount 列。我们想要比较每笔销售金额与前一笔销售金额的变化。

    1
    2
    3
    4
    5
    SELECT
    sale_date,
    amount,
    LAG(amount, 1) OVER (ORDER BY sale_date) AS previous_amount
    FROM sales;

    在这个查询中:

    amount 是当前销售金额。
    LAG(amount, 1) 获取当前销售的前一笔销售金额(根据 sale_date 排序)。

    使用场景

    1. 时间序列分析:
      LAG 函数非常适合分析时间序列数据,帮助用户了解数据变化趋势。例如,分析每月的销售数据,找出增长或下降的趋势。

    2. 计算变化量:
      可以计算当前值与前一值之间的变化量,例如销售额变化、温度变化等。

    3. 生成滚动报告:
      LAG 可以用来生成带有前值的滚动报告,例如计算累计销售额,或者生成滞后数据用于报表。

    举个例子

    假设我们有一个销售记录表 monthly_sales,结构如下:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    CREATE TABLE monthly_sales (
    month DATE,
    sales_amount DECIMAL(10, 2)
    );

    INSERT INTO monthly_sales (month, sales_amount) VALUES
    ('2024-01-01', 1000.00),
    ('2024-02-01', 1500.00),
    ('2024-03-01', 1200.00),
    ('2024-04-01', 1700.00);

    我们可以使用 LAG 函数来比较每个月的销售额与前一个月的销售额:

    1
    2
    3
    4
    5
    6
    SELECT
    month,
    sales_amount,
    LAG(sales_amount, 1) OVER (ORDER BY month) AS previous_month_sales,
    sales_amount - LAG(sales_amount, 1) OVER (ORDER BY month) AS sales_difference
    FROM monthly_sales;

    查询结果:

    monthsales_amountprevious_month_salessales_difference
    2024-01-011000.00NULLNULL
    2024-02-011500.001000.00500.00
    2024-03-011200.001500.00-300.00
    2024-04-011700.001200.00500.00

    在这个查询中:
    previous_month_sales 显示了前一个月的销售额。
    sales_difference 显示了当前月与前一个月的销售额差异。
    通过上述查询,我们可以方便地分析销售数据的变化情况。

    总结

    LAG 函数是一个强大的工具,可以帮助你在数据分析中处理行间的比较和变化。无论是用于时间序列数据、计算变化量,还是生成滚动报告,LAG 函数都能提供有价值的信息。它通常与 OVER 子句一起使用, 在OVER子句中,我们可以指定分组条件、排序条件等。ORDER BY 确定行的顺序,PARTITION BY 则可以用来将数据分组,使每个分组内的计算互相独立。

    ]]>
    @@ -370,7 +344,7 @@ https://stonefishy.github.io/2024/08/02/how-to-query-tree-structured-relation-data-in-mysql/ 2024-08-02T10:08:52.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z To query hierarchical relational data in MySQL, recursive Common Table Expressions (CTEs) are typically used. However, MySQL did not support recursive CTEs before version 8.0, so in earlier versions, self-joins are commonly used to handle such queries. Below is an example using a self-join, assuming we have a table employees that contains information about employees and their manager IDs (manager_id).

    Create Table and Insert Data

    Creating a table named employees with the following columns:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    CREATE TABLE employees (
    id INT PRIMARY KEY,
    name VARCHAR(100),
    manager_id INT,
    FOREIGN KEY (manager_id) REFERENCES employees(id)
    );

    INSERT INTO employees (id, name, manager_id) VALUES
    (1, 'CEO', NULL),
    (2, 'CTO', 1),
    (3, 'CFO', 1),
    (4, 'Developer Lead', 2),
    (5, 'Accountant Lead', 3),
    (6, 'Developer', 4),
    (7, 'Junior Developer', 4),
    (8, 'Senior Accountant', 5),
    (9, 'Junior Accountant', 5);

    Self-Join

    We can search for all employees and their direct reports (subordinates) using a self-join. The following SQL statement will list all employees and their direct manager’s name.

    1
    2
    3
    4
    SELECT e1.name AS employee_name, e2.name AS manager_name
    FROM employees e1
    LEFT JOIN employees e2 ON e1.manager_id = e2.id
    ORDER BY e1.manager_id, e1.id;

    We can also use a self-join to count the number of direct reports for each manager. The following SQL statement will list all managers and the number of their direct reports.

    1
    2
    3
    4
    5
    SELECT e1.name AS manager_name, COUNT(e2.id) AS subordinate_count
    FROM employees e1
    LEFT JOIN employees e2 ON e1.id = e2.manager_id
    GROUP BY e1.id
    ORDER BY subordinate_count DESC;

    Recursive Common Table Expressions (CTEs)

    MySQL 8.0 introduced support for recursive CTEs, which allows us to query hierarchical relational data more efficiently. The following SQL statement will list all employees and their direct reports (subordinates) using a recursive CTE.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    WITH RECURSIVE subordinates AS (
    SELECT id, name, manager_id
    FROM employees
    WHERE id = 1 -- root node CEO, we can replace with any other root node ID, for example 2 which is CTO
    UNION ALL
    SELECT e.id, e.name, e.manager_id
    FROM employees e
    INNER JOIN subordinates s ON e.manager_id = s.id
    )
    SELECT * FROM subordinates;

    But please note that this method only works for MySQL 8.0 and above, as these versions support recursive CTEs.

    ]]>
    @@ -394,7 +368,7 @@ https://stonefishy.github.io/2024/07/26/mysql-8-x-ctes-feature-with-clause/ 2024-07-26T09:19:59.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z MySQL Common Table Expressions (CTEs) are a powerful feature introduced in MySQL 8.0. CTEs are a type of MySQL 8.0 that provide a way to create temporary result sets that can be referenced within a SELECT, INSERT, UPDATE, or DELETE statement. The primary purpose of `CTEs` is to make complex queries more readable and manageable by breaking them down into simpler.

    MYSQL CTEs feature - WITH clause
    MYSQL CTEs feature - WITH clause

    Purpose of CTEs

    • Readability: CTEs can make SQL queries more readable, especially for complex queries involving multiple subqueries or recursive operations.
    • Modularity: They allow you to define a temporary result set that can be reused within the same query, promoting code reuse and reducing redundancy.
    • Recursive Queries: CTEs support recursive queries, which are useful for querying hierarchical data like organizational charts, bill of materials, or tree structures.

    How to Use CTEs

    CTEs are defined using the WITH clause and can be referenced within the main query. Here’s the basic syntax:

    1
    2
    3
    4
    5
    WITH cte_name AS (
    SELECT ...
    )

    SELECT ... FROM cte_name;

    Basic CTE

    Suppose you have a table employees and you want to find the average salary of employees in each department.

    1
    2
    3
    4
    5
    6
    7
    WITH DepartmentSalaries AS (
    SELECT department_id, AVG(salary) AS avg_salary
    FROM employees
    GROUP BY department_id
    )

    SELECT * FROM DepartmentSalaries;

    In this example, DepartmentSalaries is a CTE that calculates the average salary for each department. The main query then selects from this CTE.

    CTEs feature also supports multiple temporary result sets in the same query, see below example:

    1
    2
    3
    4
    5
    WITH
    cte1 AS (SELECT a, b FROM table1),
    cte2 AS (SELECT c, d FROM table2)

    SELECT b, d FROM cte1 JOIN cte2 WHERE cte1.a = cte2.c;

    Above sqls defines two CTEs cte1 and cte2 and then joins them using a WHERE clause.

    Recursive CTE

    A CTE can refer to itself to define a recursive CTE. Common applications of recursive CTEs include series generation and traversal of hierarchical or tree-structured data.

    Suppose you have a table employees with a self-referencing column manager_id to represent a hierarchy. You want to find all subordinates of a given manager.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    WITH RECURSIVE Subordinates AS (
    SELECT employee_id, manager_id, name
    FROM employees
    WHERE manager_id = 1 -- Starting with the manager ID 1
    UNION ALL
    SELECT e.employee_id, e.manager_id, e.name
    FROM employees e
    INNER JOIN Subordinates s ON e.manager_id = s.employee_id
    )

    SELECT * FROM Subordinates;

    In this example, Subordinates is a recursive CTE that starts with employees directly reporting to manager ID 1 and then recursively includes all their subordinates.

    Key Points

    • Non-Recursive CTEs: These are straightforward and do not involve recursion. They are simply a way to define a temporary result set for use within the query.
    • Recursive CTEs: These involve recursion and are useful for hierarchical or tree-structured data. They must include a UNION ALL clause to combine the initial result set with the recursive result set.
    • Scope: CTEs are scoped to the query they are defined in and cannot be referenced outside of that query.

    CTEs are a powerful tool in MySQL that can significantly improve the readability and maintainability of complex SQL queries.

    For other scenarios, like use WITH clause in UPDATE or DELETE statements, please refer to the following links:

    ]]>
    @@ -418,7 +392,7 @@ https://stonefishy.github.io/2024/07/04/using-pulumi-to-import-the-aws-resources-of-the-other-region/ 2024-07-04T10:27:55.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z Context

    By default, the Pulumi import the resource in the region which is specified in the Pulumi.yaml or Pulumi.<stack-name>.yaml file. If we import the resources which is located in other regions. It will cause the error by using pulumi import command.

    For example, we have quicksight resources such like DataSource, DataSet located in the eu-west-1 region, we already manage these resources in the pulumi by using pulumi import CLI command. All resources are located in eu-west-1 region. It is specified in the Pulumi.yaml or Pulumi.<stack-name>.yaml file like below.

    1
    2
    config:
    aws:region: eu-west-1

    Now we also want to import the existing resources such like QuickSight user Groups into the pulumi. But the AWS Quicksight user Groups resources all are located in the us-east-1 region. The pulumi will give us the error if we try to import the other region resource direclty.

    This is because the Pulumi is using default provider for the AWS resources. The default provider is set to the region which is specified in the Pulumi.yaml or Pulumi.<stack-name>.yaml file. So, if we want to import the resources from other region, we need to specify the provider for that region.

    Pulumi Provider

    A Pulumi provider is a plugin that enables Pulumi to interact with a specific cloud provider or service. These providers are responsible for translating the Pulumi code into the appropriate API calls for the target cloud platform.

    By default, each provider uses its package’s global configuration settings, which are controlled by your stack’s configuration. You can set information such as your cloud provider credentials with environment variables and configuration files. If you store this data in standard locations, Pulumi knows how to retrieve them. For example, you can run below command to set the AWS region to eu-west-1 region for the AWS provider configuration.

    1
    pulumi config set aws:region eu-west-1

    This command actually will set the aws:region configuration value for the AWS provider in your Pulumi stack yaml file. You can also define the provider in your pulumi code, and create related resources in the specified region.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    import pulumi
    import pulumi_aws as aws

    # Create a new provider for the us-east-1 region
    us_east_1_provider = aws.Provider('us-east-1', region='us-east-1')
    # Create the Quicksight Groups resources in the us-east-1 region
    quicksight_group = aws.quicksight.Group(
    "dev",
    aws_account_id="<aws-account-id>",
    group_name="dev",
    opts=pulumi.ResourceOptions(
    provider=us_east_1_provider
    )
    )

    In above code, we create a new provider for the us-east-1 region and then create the Quicksight user Groups resources in the us-east-1 region. The provider option is used to specify the provider to use for the resource. Even we have global configuration for the eu-west-1 region, we can still create the resources in the us-east-1 region by specifying the provider.

    Importing the AWS Resources of the Other Region

    Back to previous topic, if we want to import the AWS Quicksight Users and Groups resources from the us-east-1 region in current pulumi stack from the command line, we need to specify the provider for the pulumi command line. The Pulumi CLI import command takes an additional --provider option to specify the provider to use for the import.

    1
    pulumi import aws:quicksight/group:Group dev xxxxxx/default/dev --provider name=urn

    In above command, we are importing the aws:quicksight/group:Group resource with the dev name in the provider. For the --provider option, The name is the name of the provider to use for the import, and urn is the URN of the provider to use for the import. Typically, the resource urns in pulumis is below format.

    1
    2
    3
    urn:pulumi:production::acmecorp-website::custom:resources:Resource$aws:s3/bucket:Bucket::my-bucket
    ^^^^^^^^^^ ^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^
    <stack-name> <project-name> <parent-type> <resource-type> <resource-name>

    If there is no parent-type in the resource urn, the urns will be like below format.

    1
    2
    3
    urn:pulumi:production::acmecorp-website::aws:s3/bucket:Bucket::my-bucket
    ^^^^^^^^^^ ^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^
    <stack-name> <project-name> <resource-type> <resource-name>

    For the details of Pulumi Resources URNs, please refer to the Pulumi URNs.

    In our scenario, we can import the Quicksight Groups resources from the us-east-1 region by using the provider. There is one thing is important to note For example, we don’t have any Provider resources for the us-east-1 region in our current stack. If we run below command to import the Quicksight Groups resources from the us-east-1 region, it will fail. Below is an examle of the full import resource with --provider option

    1
    pulumi import aws:quicksight/group:Group dev <aws-account-id>/default/dev --provider us_east_1_provider=urn:pulumi:<pulumi-project-name>::quicksight::pulumi:providers:aws::us_east_1_provider

    The <aws-account-id> and <pulumi-project-name> are placeholder just for example. Without the Provider resource for the us-east-1 region, the import command will fail as below error message.

    1
    Preview failed: bad provider reference 'us_east_1_provider=urn:pulumi:<pulumi-project-name>::quicksight::pulumi:providers:aws::us_east_1_provider' is not valid URN'

    The error full screenshot is below.

    To fix this issue, we need to create the Provider resource for the us-east-1 region in our current stack. We can do this by adding the Provider resource in in our Pulumi code and using Pulumi up command to create the resource.

    1
    2
    3
    4
    5
    import pulumi
    import pulumi_aws as aws

    # Create a new provider for the us-east-1 region
    us_east_1_provider = aws.Provider('us-east-1', region='us-east-1')

    After that, we can run the import command again to import the Quicksight Groups resources from the us-east-1 region. And now you will see the Quicksight Groups resources of the us-east-1 region in your pulumi stack.

    Summary

    To import the AWS resources of the other region, we need to specify the provider for the pulumi command line. The Pulumi CLI import command takes an additional --provider option to specify the provider to use for the import. The provider resource should be created in pulumi before importing the resources of the other region resource.

    ]]>
    @@ -433,14 +407,14 @@ + + - -
    @@ -450,7 +424,7 @@ https://stonefishy.github.io/2024/06/25/keras3-0-a-multi-framework-machine-learning-library/ 2024-06-25T10:12:36.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z Keras3 is a full rewrite of Keras that enables you to run your Keras workflows on top of either JAX, TensorFlow, or PyTorch, and that unlocks brand new large-scale model training and deployment capabilities. It’s multi-framework machine learning, meaning that you can use Keras to train models on top of different backends, and deploy them to different platforms. You can also use Keras as a low-level cross-framework language to develop custom components such as layers, models, or metrics that can be used in native workflows in JAX, TensorFlow, or PyTorch — with one codebase.

    Keras 3 Multi-framework Machine Learning
    Keras 3 Multi-framework Machine Learning

    What’s New in Keras 3?

    Keras 3 introduces several exciting features that enhance its usability, performance, and flexibility:

    Unified API

    Keras 3 continues to build on its legacy of a user-friendly and intuitive API. It aims to unify the high-level and low-level APIs more seamlessly, providing a consistent experience across different backends such as TensorFlow, PyTorch, and others.

    Multi-backend Support

    While Keras has traditionally been closely associated with TensorFlow, Keras 3 expands its compatibility to other popular deep learning frameworks. This means you can now use Keras with PyTorch and other backends, leveraging Keras’ high-level abstractions and ease of use across different environments.

    Improved Performance

    Efforts have been made in Keras 3 to optimize performance across various operations, ensuring faster execution times and better utilization of hardware resources. This improvement is crucial for handling larger datasets and complex models efficiently.

    Enhanced Model Deployment

    Keras 3 simplifies the process of deploying trained models to production environments. With streamlined APIs for model serialization and deployment tools, it becomes easier to integrate Keras models into real-world applications.

    Expanded Model Zoo

    Keras 3 comes with an expanded model zoo, offering pre-trained models for a wider range of tasks and domains. This includes vision models (e.g., ResNet, EfficientNet), NLP models (e.g., BERT, GPT), and other specialized architectures, all accessible through a unified interface.

    Advanced AutoML Capabilities

    The new release includes improved AutoML capabilities, allowing developers to automate model selection, hyperparameter tuning, and architecture search. This feature can significantly accelerate the model development process, especially for beginners and researchers exploring new domains.

    Pre-trained Models

    There’s a wide range of pretrained models that you can start using today with Keras 3. About 40 Keras Applications models (the keras.applications namespace) are available in all backends. These models are pre-trained on large datasets and can be used for transfer learning or fine-tuning. It includes:

    Pre-trained Models for Natural Language Processing

    • Albert
    • Bart
    • Bert
    • Bloom
    • DebertaV3
    • DistilBert
    • Gemma
    • Electra
    • Falcon
    • FNet
    • GPT2
    • Llama
    • Llama3
    • Mistral
    • OPT
    • PaliGemma
    • Phi3
    • Roberta
    • XLMRoberta

    Pre-trained Models for Computer Vision

    • CSPDarkNet
    • EfficientNetV2
    • MiT
    • MobileNetV3
    • ResNetV1
    • ResNetV2
    • VideoSwinB
    • VideoSwinS
    • VideoSwinT
    • VitDet
    • YOLOV8
    • ImageClassifier
    • VideoClassifier
    • CLIP
    • RetinaNet

    How to Get Started with Keras 3?

    1.Install Keras 3

    Ensure you have the latest version of Keras installed. You can install Keras via pip if you haven’t already:

    1
    pip install --upgrade keras

    2.Define Model

    Use Keras’ high-level API to define your deep learning model. Here’s a simple example of a convolutional neural network (CNN) for image classification:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
    from keras.models import Sequential

    model = Sequential([
    Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
    MaxPooling2D((2, 2)),
    Conv2D(64, (3, 3), activation='relu'),
    MaxPooling2D((2, 2)),
    Flatten(),
    Dense(64, activation='relu'),
    Dense(10, activation='softmax')
    ])

    3.Compile and Train Model

    Compile the model with a loss function, optimizer, and metrics, then train it on your data:

    1
    2
    3
    4
    5
    model.compile(optimizer='adam',
    loss='sparse_categorical_crossentropy',
    metrics=['accuracy'])

    model.fit(train_images, train_labels, epochs=10, batch_size=32, validation_data=(val_images, val_labels))

    4.Deploy models

    Keras 3 provides a simple and unified interface for deploying trained models to production environments. You can serialize your models and deploy them using tools such as TensorFlow Serving, PyTorch Hub, or JAX Hub.

    Summary

    Keras 3 bring a lot of exciting features to the table, including multi-backend support, improved performance, and enhanced model deployment. It also includes a wide range of pre-trained models for natural language processing and computer vision, making it easy to get started with deep learning. With these features, Keras 3 is a powerful and flexible tool for building and deploying deep learning models.

    ]]>
    @@ -484,7 +458,7 @@ https://stonefishy.github.io/2024/06/14/understanding-the-x-frame-options-http-header/ 2024-06-14T14:36:28.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z Recently, we build a frontend website as a nginx docker image, before go live on production. We asking the security team to do the security scan for the website on stage environment. One of security issues indicates the X-Frame-Options HTTP header is not set properly. It will cause the website to be vulnerable to clickjacking attacks.

    Clickjacking Attack

    Clickjacking is a type of security vulnerability that allows an attacker to trick a user into clicking on a link or button on a malicious website that is designed to look like the legitimate website. This can happen when the attacker embeds the malicious website within a frame on the legitimate website, which can trick the user into clicking on the malicious link or button.

    To prevent clickjacking attacks, we can use the X-Frame-Options HTTP header to specify whether a web page can be displayed within a frame or iframe. This header can have three possible values: DENY, SAMEORIGIN, and ALLOW-FROM uri.

    What is X-Frame-Options?

    The X-Frame-Options is an HTTP response header used to control whether a web page can be displayed within a frame or iframe. It helps to mitigate clickjacking attacks by preventing malicious websites from embedding a vulnerable site within a frame and tricking users into taking unintended actions.

    How it works

    The X-Frame-Options header can have three possible values: DENY, SAMEORIGIN, and ALLOW-FROM uri.

    DENY: This value prevents the page from being displayed in a frame, regardless of the site attempting to do so.

    SAMEORIGIN: With this value, the page can be displayed in a frame on the same origin as the page itself. This restricts the frame to the same origin as the parent page.

    ALLOW-FROM uri: Here, the page can only be displayed in a frame on the specified origin.

    Implementation

    To implement the X-Frame-Options header, simply include the header in the server’s HTTP response. It can be implemented on code programming, server configuration, or web server configuration.

    Code Programming

    Below is an example of how to set the header using different programming languages:

    Using Node.js (Express)

    1
    2
    3
    4
    5
    // Set X-Frame-Options header to DENY
    app.use((req, res, next) => {
    res.setHeader('X-Frame-Options', 'DENY');
    next();
    });

    Using Django (Python)

    1
    2
    # Set X-Frame-Options header to SAMEORIGIN
    response['X-Frame-Options'] = 'SAMEORIGIN'

    Using ASP.NET (C#)

    1
    2
    // Set X-Frame-Options header to ALLOW-FROM
    Response.AddHeader("X-Frame-Options", "ALLOW-FROM https://example.com");

    Server Configuration

    Nginx

    To configure Nginx to send the X-Frame-Options header, add this either to your http, server or location configuration:

    1
    add_header X-Frame-Options SAMEORIGIN always;

    Apache

    To configure Apache to send the X-Frame-Options header for all pages, add this to your site’s configuration:

    1
    Header always set X-Frame-Options "DENY"

    IIS

    To configure IIS to send the X-Frame-Options header for all pages, add this to your web.config file:

    1
    2
    3
    4
    5
    6
    7
    <system.webServer>
    <httpProtocol>
    <customHeaders>
    <add name="X-Frame-Options" value="DENY" />
    </customHeaders>
    </httpProtocol>
    </system.webServer>

    Demo

    To demonstrate the effectiveness of the X-Frame-Options header, we can create a parent html page, and a child html page that is embedded within a frame in the parent page.

    Parent HTML Page

    Parent HTML page includes the iframe of the child page. The child page is hosted on a different domain (http://localhost:3333/child.html) to demonstrate the effectiveness of the X-Frame-Options header.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    <!DOCTYPE html>
    <html>
    <head>
    <title>Parent Page</title>
    </head>
    <body>
    <h1>Parent Page</h1>
    <p>This is parent page. below is the iframe of child page.</p>
    <iframe src="http://localhost:3333/child.html" frameborder="0" sandbox="allow-scripts" style="width: 100%; height: 200px;"></iframe>
    </body>
    </html>

    Child HTML Page

    Child HTML page is a simple page that displays a message. It is hosted on the domain (http://localhost:3333/child.html) by using httpster tool.

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    <!DOCTYPE html>
    <html>
    <head>
    <title>Child Page</title>
    </head>
    <body>
    <h1>Child Page</h1>
    <p>This is a child page.</p>
    </body>
    </html>

    Testing

    To test the effectiveness of the X-Frame-Options header, we can open the parent page in a browser and observe the behavior.

    Without X-Frame-Options Header

    By default, the httpster does not add the X-Frame-Options header to the response. Therefore, the child page can be embedded within a frame on the parent page. See below screenshot, these is no X-Frame-Options header in the response.

    Without X-Frame-Options Header
    Without X-Frame-Options Header

    With X-Frame-Options Header

    With the X-Frame-Options header set to DENY, the child page cannot be embedded within a frame on the parent page.

    To test the X-Frame-Options header, we need to modify the httpster server source code to add the X-Frame-Options header to the response. Actually, the httpster tool is a simple HTTP server base on node express. We can modify the app.use function to set the X-Frame-Options header in the httpster source code.

    Here is the modified app.use function with the X-Frame-Options header set to DENY:

    1
    2
    3
    4
    app.use((req, res, next) => {
    res.setHeader('X-Frame-Options', 'DENY');
    next();
    });

    We can then open the parent page in a browser and observe the behavior. See below screenshot, these is with X-Frame-Options header value set to DENY in the response. And the child page is blocked from being embedded within a frame on the parent page.

    With X-Frame-Options Header Value DENY
    With X-Frame-Options Header Value DENY

    And also, there is error message in the console of the browser, which indicates that the child page is blocked from being embedded within a frame on the parent page.

    With X-Frame-Options Header Value DENY Console Error
    With X-Frame-Options Header Value DENY Console Error

    You can also test the X-Frame-Options header with different values such as SAMEORIGIN and ALLOW-FROM uri to see how it affects the behavior of the website.

    Conclusion

    By implementing the X-Frame-Options header, web developers can enhance the security of their websites and protect users from potential clickjacking attacks. It is recommended to set this header appropriately based on the specific requirements of the web application.

    Remember to test the effectiveness of the header using browser developer tools and security testing tools to ensure that it is properly configured.

    ]]>
    @@ -508,7 +482,7 @@ https://stonefishy.github.io/2024/04/18/the-points-of-aws-china-cloudfront-you-need-to-notice/ 2024-04-18T10:16:54.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z There are much difference between AWS Global and AWS China. The background of this blog is that I’m responsible for migrating the aws global application to aws china. The application already go lived on AWS Global. The application is collecting the user inforamtion and for business logic. The business wants this application to serve China customer. Due to the application regulation, the application needs to deployed in AWS China and store the user information in AWS China.

    The application is using below AWS services:

    • AWS S3: store the static website assets and user information.
    • AWS ALB: the load balancer for the application.
    • AWS ASG: auto scaling group for the application.
    • AWS ECR: store the application container image.
    • AWS ECS: run the application container.
    • AWS ACM: manage the SSL certificate.
    • AWS WAF: web application firewall.
    • AWS VPC: virtual private cloud.
    • AWS S3 VPC Gateway: access the S3 bucket from the VPC.
    • AWS CloudWatch: monitor the application logs, performance and alarms.
    • AWS SNS: notificate the stack holder when application performance is abnormal.
    • AWS CloudFront: serve the static website and user information.

    AWS China

    The AWS China is a separate entity operated by a local partner in compliance with Chinese regulations. Data centers located in Beijing and Ningxia. The operator is different between Beijing and Ningxia. Beijing region operated by Sinnet(光环新网),Ningxia region operated by NWCD(西云数据). Basically, the service price of Ningxia region is cheaper than Beijing region. You can find the detail pricing in the AWS China link https://calculator.amazonaws.cn/#/. AWS Fargate priciing is here https://www.amazonaws.cn/en/fargate/pricing

    Difference between AWS Global and AWS China

    The AWS China has many limiation and difference with AWS Global. And also some new services are not available in AWS China. When you migrate the application to AWS China, you need to consider the below points:

    1. AWS China has different pricing policy. The pricing policy is different between Beijing and Ningxia.
    2. The Infrastructure code is different between AWS Global and AWS China. The code need to be modified to adapt to AWS China.
    3. The Website should be do the ICP filling and Goverment Filling. (域名备案,网安备案)

    Infrastructure as Code

    We’re using Pulumi to manage the infrastructure as code. Pulumi is a tool for developing, building, and deploying cloud applications and infrastructure. It supports multiple cloud providers including AWS, Azure, GCP, and Kubernetes.
    There are AWS Service Resource definition is different with AWS Global on AWS China. In AWS China there is an amazonaws.com.cn string for endpoint, and aws-cn ARN prefix. The code need to be modified to adapt to AWS China.

    AWS China

    • AWS EndPoint: xxxxxxx.s3.cn-northwest-1.amazonaws.com.cn/example.txt
    • AWS ARNs: arn:aws-cn:s3:::xxxxxxx/example.txt

    AWS Global

    • AWS EndPoint: xxxxxxx.s3.cn-northwest-1.amazonaws.com/example.txt
    • AWS ARNs: arn:aws:s3:::xxxxxxx/example.txt

    CloudFront

    In our application is much difference between AWS Global and AWS China, especially the CloudFront.

    • Requires ICP filing and domain name filing in AWS China.
    • The CloudFront provides domain name like “*.cloudfront.cn” which cannot be used in for website serving in AWS China. You can not access the website through the CloudFront domain name. It returns 403 Forbidden error.
    • The SSL/TLS certificates for CloudFront does not support the Amazon Certificate Manager in AWS China. It requires to use SSL/TLS certificate from third party, and then - - import certificate in IAM. It is only support IAM to store the certificates for CloudFront in AWS China.
    • The CloudFront does not supports the Amazon WAF in AWS China.
    • The Cache polices and Origin request polices does not support in AWS China
    • The Lambda@Edge is not available in AWS China.
    • CloudFront origin access only supports legacy access identities OAI for S3 bucket, does not support OAC in AWS China
    • The CloudFront origin for S3 bucket which is not a website endpoint, the following format: bucket-name.s3.region.amazonaws.com.cn, remember region after s3
    • The CloudFront origin for S3 bucket which is a website endpoint, use the following format: bucket-name.s3-website.region.amazonaws.com.cn, remember region after s3-website

    For more information, please refer to the AWS China CloudFront https://docs.amazonaws.cn/en_us/aws/latest/userguide/cloudfront.html#feature-diff

    Summary

    In this blog, we have discussed the important points when migrate the aws global application to aws china, especially for the AWS CloudFront. We have listed the difference between AWS Global and AWS China, and also the CloudFront difference between AWS Global and AWS China.

    If you want to know more about AWS China service difference with AWS Global, you can refer to this official link https://docs.amazonaws.cn/en_us/aws/latest/userguide/services.html

    Hope this blog can help you to migrate the application to AWS China.

    ]]>
    @@ -534,7 +508,7 @@ https://stonefishy.github.io/2024/03/13/migrate-a-legacy-application-to-aws-cloud/ 2024-03-13T14:20:44.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z Background

    Recently, we got a requirement from the company to move the application to AWS Cloud. The company has a strong focus on security and compliance, and stack holders also want the application more reliable and scalable. The migration also need to be done as soon as possible.

    The application running on a local data center. The application is consists of two parts, frontend is a static website built with React and provide the user interface to user, the backend is a Python Flask application that provide the API to interact with the frontend. The backend server also contains a machine learning model algorithm that is used to process the user’s ears photo.

    The application main logic is that the user answer some questions and scan and upload their ears photo to the backend server from the website, the backend server will process the photo and return the suggestion result to user to recommend which headset or earphone is the best fit for them.

    Architecture

    After analysis application technologies and architecture, base on the requirements, we did some architecture design. Below is the architecture of the application on AWS Cloud.

    Application Architecture on AWS Cloud
    Application Architecture on AWS Cloud

    The application is hosted on AWS Cloud, major is that the frontend is served by CloudFront, the backend is served by ECS, and the user’s ears photo is stored in S3 bucket. The application is using the following AWS services:

    AWS S3 Bucket

    Setup two s3 buckets, one is for storing the user’s ears photo, and config the object expires after 90 days. second bucket is for storing the static website files. All s3 buckets are public blocked.

    AWS VPC

    Create a dedicate VPC for the application, and configure the subnets, route tables, and security groups. Two public subnets and two private subnets are used.

    AWS ECR

    Use ECR to store the Docker image of the backend application. The image will be built and pushed to ECR by CI/CD pipeline.

    AWS ECS

    Use ECS to run the backend application as a container in private subnets, and configure the auto scaling group and load balancer. Autoscaling minimum size is 2 and maximum size is 20.

    AWS ALB

    Create a ALB to serve the backend ECS service, and configure the listener rules to forward the traffic to the ECS service. The ALB attached the SSL certificate from ACM.

    AWS S3 VPC Gateway Endpoint

    Use the S3 VPC Gateway Endpoint to access the s3 bucket from the backend ECS container.

    AWS Internet Gateway

    The Internet Gateway to connect the VPC to the internet. Put the ALB on the two public subnets across two AZs

    AWS CloudWatch

    Use CloudWatch to monitor the application performance, and create alarms to notify the team when the application is not running as expected.

    AWS SNS

    Use SNS to notify the team when the application performance is not good, and the team can take action to improve the application performance.

    AWS ACM

    Use ACM to manage the SSL certificate for the ALB and CloudFront, the certificate is issued by the IT team. The application is served over HTTPS.

    AWS CloudFront

    Use CloudFront to serve the static website files, and cache the files to improve the website loading speed. Config CloudFront to access s3 bucket by OAC. Create a another origin for the ALB.

    AWS Security Group

    Create a security group for the ECS container, and allow the traffic from the ALB to the ECS container. And one more security group for the ALB to allow the traffic only from AWS CloudFront prefix list.

    AWS IAM

    Create an IAM role for the ECS container, and attach the necessary policies to the role to access the s3 bucket, ECR, and CloudWatch.

    AWS WAF

    Use WAF to protect the application from common web exploits and attacks. This is mandatory for the company’s security policy. The security team will also review the infrastucture and do the security scan the application. The application won’t be deployed to production if the security scan failed.

    IaC with Pulumi

    Use Pulumi to manage the AWS resources, and create the infrastructure as code. The code will be checked into the source control, and for the pipeline, we’re using Bamboo pipeline as company already using Bamboo for CI/CD. The pipeline will doing below major things.

    1. Build the Docker image and push to ECR.
    2. Deploy the frontend static website to CloudFront, and invalidate the cache to make the content updated for end user.
    3. Update the infrastucture by creating or updating the AWS resources by using pulumi.

    Rationale

    The migration of the legacy application to AWS Cloud is a complex task, and we need to follow the best practices to make the migration successful.

    1. Using CloudFront and S3 bucket to host the static website and user’s ears photo is scalable and cost-effective.
    2. Using the ECS and ALB to serve the backend application is also a good choice to improve the application performance and scalability. We’re not using AWS API Gateway and AWS Lambda to serve as backend because we are requested to migrate the application to Cloud as soon as possible. Build the python Flask application to a docker image and push to ECR is a good practice to deploy the application to AWS Cloud in this situation.
    3. Using the VPC and security group to isolate the application and improve the security is a must. The ECS is located in private subnets, and the ALB is in public subnets, and the traffic is only allowed from AWS CloudFront prefix list to ALB, then forward traffic to ECS container.
    4. Using the ACM to manage the SSL certificate for the ALB and CloudFront is a good practice to improve the security and compliance.
    5. Using the CloudWatch to monitor the application performance and create alarms to notify the team when the application is not running as expected is a good practice to improve the application reliability.
    6. Using the IAM role to access the s3 bucket, ECR, and CloudWatch is a good practice to improve the security and control.
    7. Using the WAF to protect the application from common web exploits and attacks is a mandatory requirement for the company’s security policy.
    8. Using Pulumi to manage the AWS resources as code is a good practice to improve the automation and reliability of the migration process.

    Summary

    This is just a sample of how to migrate a legacy application to AWS Cloud, and there are many other factors to consider when migrating a legacy application to AWS Cloud. The key is to follow the best practices and use the right tools to make the migration successful base on the requirements.

    ]]>
    @@ -549,14 +523,14 @@ + + - - @@ -568,7 +542,7 @@ https://stonefishy.github.io/2024/02/27/importing-existing-cloud-resources-with-pulumi/ 2024-02-27T14:26:24.000Z - 2024-11-15T06:39:09.206Z + 2024-11-18T09:56:32.357Z In many real-world scenarios, cloud infrastructure is already in place before adopting infrastructure as code (IaC) solutions like Pulumi. Pulumi provides a feature called import to help manage existing cloud resources within its IaC framework. This feature allows users to import the current state of resources into their Pulumi codebase, making it easier to adopt Pulumi for managing existing infrastructure.

    Pulumi Import

    Pulumi’s import feature provides a way to bring existing cloud resources under Pulumi’s management. By creating a Pulumi program and using the pulumi import command, users can declare and manage existing infrastructure resources using Pulumi. The pulumi supports both importing existing resources with the CLI and importing existing resources in the code. Here we’re talking about the CLI import to generate the code for the imported resources.

    Usage and Syntax

    To import an existing cloud resource into Pulumi, you need to follow these steps:

    1. Create a Pulumi Project
      Create a new Pulumi project or use an existing Pulumi project where you want to manage the imported resources. For creating a pulumi project, you can check the previous blog post on how to create a new Pulumi project.

    2. Identify the Resource to Import
      Identify the existing resource in your cloud provider environment that you want to import into Pulumi. This could be a virtual machine, database, storage bucket, or any other supported resource.

    3. Apply the Import
      Apply the import operation to bring the existing resource under Pulumi’s management. Pulumi will generate the appropriate code for the resource based on its current state in the cloud provider environment.

    The syntax for the import command is as follows:

    1
    pulumi import <type> <name> <id>
    • <type> is the Pulumi type token to use for the imported resource.
    • <name> is the resource name to apply to the resource once it’s imported.
    • <id> is the value to use for the resource lookup in the cloud provider.

    Managing Imported Resources

    Once the resources are imported, they can be managed just like any other Pulumi-managed resources. The imported resources can be updated, deleted, and included in stacks alongside other Pulumi-declared infrastructure.

    Example

    I created a S3 bucket name my-s3-bucket from AWS Console manually. But now I want to manage this S3 bucket by Pulumi. After identifying the bucket to be imported, the import command:

    1
    pulumi import aws:s3/bucket:Bucket my-bucket my-s3-bucket
    • aws:s3/bucket:Bucket is the Pulumi type token for the S3 bucket resource.
    • my-bucket is the resource name to apply to the imported resource.
    • my-s3-bucket is the value to use for the resource lookup in the AWS provider, here it’s bucket name.

    After running the import command, Pulumi will generate the appropriate code for the S3 bucket resource based on its current state in the AWS provider. Below is screenshot of the output of the import command:

    Pulumi Import
    Pulumi Import

    And generated code:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    import pulumi
    import pulumi_aws as aws

    my_bucket = aws.s3.Bucket("my-bucket",
    arn="arn:aws-cn:s3:::my-s3-bucket",
    bucket="my-s3-bucket",
    hosted_zone_id="Z282HJ1KT0DH03",
    request_payer="BucketOwner",
    server_side_encryption_configuration=aws.s3.BucketServerSideEncryptionConfigurationArgs(
    rule=aws.s3.BucketServerSideEncryptionConfigurationRuleArgs(
    apply_server_side_encryption_by_default=aws.s3.BucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefaultArgs(
    sse_algorithm="AES256",
    ),
    bucket_key_enabled=True,
    ),
    ),
    opts=pulumi.ResourceOptions(protect=True))

    In above code, you will notice there is a protect=True option set for the imported resource. This is to prevent any accidental deletion of the imported resource.

    So when you try to delete the imported resource, Pulumi will give the errors to you. Let’s try to delete the imported S3 bucket:

    1
    pulumi destroy

    You see, it displays the error message that the S3 bucket is protected and cannot be deleted.

    Pulumi Destory Import Protected Resource
    Pulumi Destory Import Protected Resource

    If you want to delete the resource in the cloud provider environment, you can remove the protect=True option from the code or change the protect option to False.

    In above we’re using pulumi import to import the s3 bucket resource and code is generated on console. We can also generate the code into python file directly by using below command:

    1
    pulumi import aws:s3/bucket:Bucket my-bucket my-s3-bucket -o my-s3-bucket.py

    Pulumi State

    Pulumi maintains a state file that tracks the current state of all resources in the cloud provider environment. When a resource is imported, Pulumi updates the state file to reflect the imported resource. This allows Pulumi to manage the imported resource as if it were created in the cloud provider environment.

    Sometimes, we want to delete the state which imported in pulumi, but keep the existing cloud resources. In such case, we can use below command to only delete the state and keep the existing cloud resources.

    1
    pulumi state delete <urn>
    • <urn> is the unique resource identifier of the imported resource.

    To check the <urn> of the resource, we can use pulumi stack --show-urns to see the list urns of all resources in the current stack.

    1
    pulumi stack --show-urns
    Pulumi Stack Show Urns
    Pulumi Stack Show Urns

    In above screenshot, we can see the <urn> of the imported S3 bucket resource.To delete the state of the imported resource, we can use the following command:

    1
    pulumi state delete urn:pulumi:dev::pulumi-test::aws:s3/bucket:Bucket::my-bucket --force -y
    Pulumi State Delete Imported Resource
    Pulumi State Delete Imported Resource

    After deleting the state, the imported S3 bucket will still exist in the cloud provider environment.

    Conclusion

    Pulumi’s import feature allows users to seamlessly integrate existing cloud resources into their Pulumi programs. By following the import process and syntax, users can effectively manage their entire infrastructure, including existing resources, through Pulumi’s IaC approach.

    This feature simplifies the transition to Pulumi for managing infrastructure and enables teams to leverage the benefits of IaC without having to recreate their entire cloud environment from scratch.

    ]]>
    @@ -583,14 +557,46 @@ + + + + + + + + Pulumi - A Powerful IaC to manage the cloud infrastructure + + https://stonefishy.github.io/2024/02/11/pulumi-a-powerful-iac-to-manage-the-cloud-infrastructure/ + 2024-02-11T15:12:13.000Z + 2024-11-18T09:56:32.357Z + + To manage the application cloud infrastructure more efficiently, we can use the Terraform for IaC(Infrastructure as Code). But today, we’re not going to talk about the Terraform, we’re going to talk about the Pulumi. A powerful IaC tool that manages the cloud infrastructure.

    Pulumi Platform
    Pulumi Platform

    Pulumi is an open-source infrastructure as code (IaC) tool that provides a powerful way to create, deploy, and manage cloud infrastructure. It is the easiest way to build and deploy infrastructure, of any architecture and on any cloud, using programming languages that you already know and love, such as TypeScript, Python, Go, C#, Java etc.

    It is a cross-platform tool that runs on Windows, Linux, and macOS, and supports a wide range of cloud providers, including AWS, Azure, GCP, Kubernetes, Docker, and more. It is also easy to use and has a simple and intuitive interface.

    CI/CD integration is also supported, which means you can use Pulumi to deploy your infrastructure as part of your CI/CD pipeline. This makes it easier to manage and update your infrastructure as your application evolves.

    Install Pulumi

    The pulumi is a cross-platform tool that runs on Windows, Linux, and macOS. You can find and download the latest version from the official website: https://www.pulumi.com/docs/install/versions/. Follow this link to https://www.pulumi.com/docs/install/ to install the Pulumi CLI. It’s very simple to set up the Pulumi CLI on your machine.

    Once you installed pulumi, simply run the below command to check the version:

    1
    pulumi version

    Create a new Pulumi project

    To create a new Pulumi project, you can use the pulumi new command. Below command is creating a new project with AWS Python template.

    1
    pulumi new aws-python

    This will create a new project with a simple AWS Python template. The project will have a Pulumi.yaml file, which is the configuration file for the project.

    Configure the Pulumi project

    The Pulumi.yaml file is the configuration file for the project. It contains the project name and some configuration.

    1
    2
    3
    4
    5
    6
    name: my-project
    runtime:
    name: python
    options:
    virtualenv: venv
    description: A minimal AWS Python Pulumi program

    In the above configuration, we have set the project name as my-project, the runtime as python and the virtualenv as venv. The description is a brief description of the project.

    Create a new stack

    To create a new stack, you can use the pulumi stack init command. Below command is creating a new stack with the name dev.

    1
    pulumi stack init dev

    Once you created the stack, the pululmi will generate a file named Pulumi.dev.yaml in your project folder. You can select it using the pulumi stack select command.

    1
    pulumi stack select dev

    Configure the stack

    The Pulumi.dev.yaml file is the configuration file for the stack.

    1
    2
    3
    4
    encryptionsalt: v1:6YTR30z2X9tM=:v1:+fJN/nMOdJM+XjeZ:P7V9XPB9GHKE/dBuXX1uOCNGwgQztre==
    config:
    aws:region: us-west-2
    aws:profile: profile-account-1

    In above configuration, we have set the encryption salt (this is generated), and the AWS region and profile. You can add more configuration as per your requirement.

    Create a new resource

    To create a new resource, such as s3 bucket, you can write python code in the main.py file. Below is the code to create a new s3 bucket.

    1
    2
    3
    4
    import pulumi
    import pulumi_aws as aws

    bucket = aws.s3.Bucket("my-bucket")

    In the above code, we have imported the aws module and created a new s3 bucket resource. The Bucket function creates a new s3 bucket with the name my-bucket.

    Preview the changes

    To preview the changes, you can use the pulumi preview command. This command will show the changes that will be applied to the infrastructure.

    1
    pulumi preview

    Below is the project currently I’m working on for pululmi preview showcase.

    Pulumi Preview
    Pulumi Preview

    In above screenshot, you can see the changes that will be applied to the infrastructure. Including update, create and delete resources listed. You can also use the --diff option to show the difference between the current state and the desired state.

    1
    pulumi preview --diff

    Deploy the infrastructure

    To deploy the infrastructure, you can use the pulumi up command. This command will deploy the infrastructure as per the configuration in the Pulumi.yaml file.

    1
    pulumi up

    This will deploy the infrastructure and show the output.

    Check the status of the infrastructure

    To check the status of the infrastructure, you can use the pulumi stack command. This command will show the status of the infrastructure.

    1
    pulumi stack

    This will show the status of the infrastructure. Below is the output of the pulumi stack command of one project I’m working.

    Pulumi Stack
    Pulumi Stack

    Destroy the infrastructure

    To destroy the infrastructure, you can use the pulumi destroy command. This command will destroy the infrastructure as per the configuration in the Pulumi.yaml file.

    1
    pulumi destroy

    This will destroy the infrastructure. Please be aware that this command will destroy all the resources in the stack. It’s dangerous to use this command, so use it with caution. You should know what you’re doing before using this command.

    There are much more features of Pulumi, but I hope this article will give you a good idea about Pulumi.

    Conclusion

    Pulumi is a powerful IaC tool that manages the cloud infrastructure. It is easy to use and has a simple and intuitive interface. It supports a wide range of cloud providers, including AWS, Azure, GCP, Kubernetes, Docker, and more. It is also easy to integrate with CI/CD pipeline.

    ]]>
    + + + + + <p>To manage the application cloud infrastructure more efficiently, we can use the <code>Terraform</code> for <code>IaC(Infrastructure as Co + + + + + + + + + + + + +
    diff --git a/categories/Data-Analysis/index.html b/categories/Data-Analysis/index.html index 265c958b..8eb61a8d 100644 --- a/categories/Data-Analysis/index.html +++ b/categories/Data-Analysis/index.html @@ -537,17 +537,17 @@

    +
    + + + +
    + +
    + + + +
    + + + + -
    +
    diff --git a/categories/index.html b/categories/index.html index aff55330..f70ca4f5 100644 --- a/categories/index.html +++ b/categories/index.html @@ -651,10 +651,10 @@

    - + Data Analysis - 2 + 3 diff --git a/index.html b/index.html index 0a598016..4a1e91d4 100644 --- a/index.html +++ b/index.html @@ -514,9 +514,6 @@

    - - -

    - -

    + + + + @@ -2245,60 +2250,6 @@

    Recommend Articles

    - - -
    @@ -2349,7 +2300,7 @@

    Recommend Articles

    -
    +
    @@ -2403,7 +2354,7 @@

    Recommend Articles

    -
    +
    @@ -2457,7 +2408,7 @@

    Recommend Articles

    -
    +
    @@ -2511,7 +2462,7 @@

    Recommend Articles

    -
    +
    @@ -2565,7 +2516,7 @@

    Recommend Articles

    -
    +
    @@ -2619,7 +2570,7 @@

    Recommend Articles

    -
    +
    @@ -2673,7 +2624,7 @@

    Recommend Articles

    -
    +
    @@ -2727,7 +2678,7 @@

    Recommend Articles

    -
    +
    @@ -2780,6 +2731,60 @@

    Recommend Articles

    + +
    +
    + +
    + + + +
    +
    +
    + + + +
    +
    + @@ -2916,7 +2921,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -3024,18 +3029,18 @@

    Andrewsy

    - - +
    @@ -3045,18 +3050,18 @@

    Andrewsy

    - - +
    @@ -3066,18 +3071,18 @@

    Andrewsy

    - - +
    @@ -3087,18 +3092,18 @@

    Andrewsy

    - - +
    @@ -3108,18 +3113,18 @@

    Andrewsy

    - - +
    @@ -3130,7 +3135,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -3147,7 +3152,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -3169,7 +3174,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -3195,8 +3200,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -3246,7 +3251,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/10/index.html b/page/10/index.html index 18173f91..c4ac4dae 100644 --- a/page/10/index.html +++ b/page/10/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

  • + + +
    @@ -1838,7 +1896,7 @@

    -
    +
    @@ -1895,7 +1953,7 @@

    +
    @@ -1951,7 +2009,7 @@

    +
    @@ -2006,7 +2064,7 @@

    +
    @@ -2061,7 +2119,7 @@

    + -
    +
    @@ -2192,7 +2250,7 @@

    +
    @@ -2641,18 +2642,18 @@

    Andrewsy

    - - +
    @@ -2662,18 +2663,18 @@

    Andrewsy

    - - +

    @@ -2683,18 +2684,18 @@

    Andrewsy

    - - +

    @@ -2704,18 +2705,18 @@

    Andrewsy

    - - +

    @@ -2726,7 +2727,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2743,7 +2744,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2765,7 +2766,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2791,8 +2792,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2842,7 +2843,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/11/index.html b/page/11/index.html index f2be5629..db96e393 100644 --- a/page/11/index.html +++ b/page/11/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

    + + +
    @@ -2613,18 +2617,18 @@

    Andrewsy

    - - +
    @@ -2634,18 +2638,18 @@

    Andrewsy

    - - +

    @@ -2655,18 +2659,18 @@

    Andrewsy

    - - +

    @@ -2676,18 +2680,18 @@

    Andrewsy

    - - +
    @@ -2698,7 +2702,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2715,7 +2719,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2737,7 +2741,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2763,8 +2767,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2814,7 +2818,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/16/index.html b/page/16/index.html index 3ec657d9..77552162 100644 --- a/page/16/index.html +++ b/page/16/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

    + + +
    @@ -1830,7 +1889,7 @@

    +
    @@ -1885,7 +1944,7 @@

    +
    @@ -1940,7 +1999,7 @@

    +
    @@ -1995,7 +2054,7 @@

    +
    @@ -2050,7 +2109,7 @@

    + -
    +
    @@ -2162,7 +2221,7 @@

    +
    @@ -2218,7 +2277,7 @@

    +
    @@ -2276,71 +2335,6 @@

    -
    - -
    - - - -
    -
    -
    - - - -
    -

    - @@ -2482,7 +2476,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
  • - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2590,18 +2584,18 @@

    Andrewsy

    - - +
    @@ -2611,18 +2605,18 @@

    Andrewsy

    - - +

    @@ -2632,18 +2626,18 @@

    Andrewsy

    - - +

    @@ -2653,18 +2647,18 @@

    Andrewsy

    - - +

    @@ -2674,18 +2668,18 @@

    Andrewsy

    - - +

    @@ -2696,7 +2690,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2713,7 +2707,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2735,7 +2729,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2761,8 +2755,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2812,7 +2806,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/17/index.html b/page/17/index.html index 2536419b..a0b464f8 100644 --- a/page/17/index.html +++ b/page/17/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

    + + +
    @@ -2584,18 +2587,18 @@

    Andrewsy

    - - +
    @@ -2605,18 +2608,18 @@

    Andrewsy

    - - +

    @@ -2626,18 +2629,18 @@

    Andrewsy

    - - +
    @@ -2647,18 +2650,18 @@

    Andrewsy

    - - +
    @@ -2669,7 +2672,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2686,7 +2689,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2708,7 +2711,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2734,8 +2737,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2785,7 +2788,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/3/index.html b/page/3/index.html index 9295f6a7..3aaa2b72 100644 --- a/page/3/index.html +++ b/page/3/index.html @@ -514,9 +514,6 @@

    - - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -

    - - -
    @@ -1883,7 +1832,7 @@

    -
    +
    @@ -1938,7 +1887,7 @@

    -
    +
    @@ -1992,7 +1941,7 @@

    -
    +
    @@ -2047,7 +1996,7 @@

    -
    +
    @@ -2101,7 +2050,7 @@

    -
    +
    @@ -2155,7 +2104,7 @@

    -
    +
    @@ -2212,7 +2161,7 @@

    -
    +
    @@ -2269,7 +2218,7 @@

    -
    +
    @@ -2322,6 +2271,55 @@

    + + + @@ -2463,7 +2461,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2571,18 +2569,18 @@

    Andrewsy

    - - +
    @@ -2592,18 +2590,18 @@

    Andrewsy

    - - +
    @@ -2613,18 +2611,18 @@

    Andrewsy

    - - +
    @@ -2634,18 +2632,18 @@

    Andrewsy

    - - +
    @@ -2655,18 +2653,18 @@

    Andrewsy

    - - +
    @@ -2677,7 +2675,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2694,7 +2692,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2716,7 +2714,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2742,8 +2740,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2793,7 +2791,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/4/index.html b/page/4/index.html index d1cea80a..d93dff50 100644 --- a/page/4/index.html +++ b/page/4/index.html @@ -514,9 +514,6 @@

    - - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -

  • - - -
    @@ -1878,7 +1832,7 @@

    -
    +
    @@ -1932,7 +1886,7 @@

    -
    +
    @@ -1986,7 +1940,7 @@

    -
    +
    @@ -2040,7 +1994,7 @@

    -
    +
    @@ -2094,7 +2048,7 @@

    -
    +
    @@ -2148,7 +2102,7 @@

    -
    +
    @@ -2202,7 +2156,7 @@

    -
    +
    @@ -2256,7 +2210,7 @@

    -
    +
    @@ -2309,6 +2263,60 @@

    + + + @@ -2450,7 +2458,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2558,18 +2566,18 @@

    Andrewsy

    - - +
    @@ -2579,18 +2587,18 @@

    Andrewsy

    - - +
    @@ -2600,18 +2608,18 @@

    Andrewsy

    - - +
    @@ -2621,18 +2629,18 @@

    Andrewsy

    - - +
    @@ -2642,18 +2650,18 @@

    Andrewsy

    - - +
    @@ -2664,7 +2672,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2681,7 +2689,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2703,7 +2711,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2729,8 +2737,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2780,7 +2788,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/5/index.html b/page/5/index.html index cd74fe37..d52b6e76 100644 --- a/page/5/index.html +++ b/page/5/index.html @@ -514,9 +514,6 @@

    - - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -

  • - - -
    @@ -1883,7 +1832,7 @@

    -
    +
    @@ -1939,7 +1888,7 @@

    -
    +
    @@ -1995,7 +1944,7 @@

    -
    +
    @@ -2051,7 +2000,7 @@

    -
    +
    @@ -2105,7 +2054,7 @@

    -
    +
    @@ -2162,7 +2111,7 @@

    -
    +
    @@ -2216,7 +2165,7 @@

    -
    +
    @@ -2270,7 +2219,7 @@

    -
    +
    @@ -2323,6 +2272,60 @@

    + +
    +
    + +
    + + + +
    +
    +
    + + + +
    +
    + @@ -2464,7 +2467,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2572,18 +2575,18 @@

    Andrewsy

    - - +
    @@ -2593,18 +2596,18 @@

    Andrewsy

    - - +
    @@ -2614,18 +2617,18 @@

    Andrewsy

    - - +
    @@ -2635,18 +2638,18 @@

    Andrewsy

    - - +
    @@ -2656,18 +2659,18 @@

    Andrewsy

    - - +
    @@ -2678,7 +2681,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2695,7 +2698,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2717,7 +2720,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2743,8 +2746,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2794,7 +2797,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/6/index.html b/page/6/index.html index 9db2773e..d52639da 100644 --- a/page/6/index.html +++ b/page/6/index.html @@ -514,9 +514,6 @@

    - - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -

  • - - -
    @@ -1885,7 +1834,7 @@

    +
    @@ -1939,7 +1888,7 @@

    +
    @@ -1996,7 +1945,7 @@

    +
    @@ -2050,7 +1999,7 @@

    +
    @@ -2104,7 +2053,7 @@

    +
    @@ -2164,7 +2113,7 @@

    +
    @@ -2218,7 +2167,7 @@

    +
    @@ -2273,7 +2222,7 @@

    + + @@ -2467,7 +2470,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
  • - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2575,18 +2578,18 @@

    Andrewsy

    - - +
    @@ -2596,18 +2599,18 @@

    Andrewsy

    - - +

    @@ -2617,18 +2620,18 @@

    Andrewsy

    - - +

    @@ -2638,18 +2641,18 @@

    Andrewsy

    - - +

    @@ -2659,18 +2662,18 @@

    Andrewsy

    - - +

    @@ -2681,7 +2684,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2698,7 +2701,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2720,7 +2723,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2746,8 +2749,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2797,7 +2800,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/7/index.html b/page/7/index.html index 3c8e889f..476f1bb8 100644 --- a/page/7/index.html +++ b/page/7/index.html @@ -514,9 +514,6 @@

    - - -
    - 2021-02-15 + 2021-01-03
    - 深入理解 Kubernetes 中的常见资源类型 + Kubernetes - 现代容器编排平台
    - - +
    - 在 Kubernetes 中,资源类型被称为 kind,每种 kind 对应着 Kubernetes API 中的一个特定对象。了解和正确使用这些资源类型对于有效管理和部署应用程序至关重要。本文将详细介绍几种常见的 Kubernetes 资 + Kubernetes(简称为K8s)是一个开源的容器编排平台,它以自动化容器部署、扩展和操作为目标,是现代云原生应用的重要基石。我们将探讨下Kubernetes的起源、整体架构、核心组件及其工作原理,全面理解这一强大工具的作用与运作方式。 +
    - +

    - - +
    @@ -1834,17 +1838,17 @@

    - +

    - - +
    + + + -
    +
    @@ -1993,7 +2050,7 @@

    -
    +
    @@ -2048,7 +2105,7 @@

    -
    +
    @@ -2102,7 +2159,7 @@

    -
    +
    @@ -2156,7 +2213,7 @@

    -
    +
    @@ -2210,7 +2267,7 @@

    -
    +
    @@ -2269,61 +2326,6 @@

    - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -
    - @@ -2465,7 +2467,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2573,18 +2575,18 @@

    Andrewsy

    - - +
    @@ -2594,18 +2596,18 @@

    Andrewsy

    - - +
    @@ -2615,18 +2617,18 @@

    Andrewsy

    - - +
    @@ -2636,18 +2638,18 @@

    Andrewsy

    - - +
    @@ -2657,18 +2659,18 @@

    Andrewsy

    - - +
    @@ -2679,7 +2681,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2696,7 +2698,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2718,7 +2720,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2744,8 +2746,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2795,7 +2797,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/8/index.html b/page/8/index.html index 0eb17afd..ca907d87 100644 --- a/page/8/index.html +++ b/page/8/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

  • + + +
    @@ -1830,7 +1888,7 @@

    -
    +
    @@ -1884,7 +1942,7 @@

    -
    +
    @@ -1939,7 +1997,7 @@

    -
    +
    @@ -1993,7 +2051,7 @@

    -
    +
    @@ -2047,7 +2105,7 @@

    -
    +
    @@ -2102,7 +2160,7 @@

    -
    +
    @@ -2159,7 +2217,7 @@

    -
    +
    @@ -2216,7 +2274,7 @@

    -
    +
    @@ -2269,63 +2327,6 @@

    - -
    -
    - -
    - - - -
    -
    -
    - - - -
    -
    - @@ -2467,7 +2468,7 @@

    Andrewsy

  • Data Analysis
    (2)
  • + >
    Data Analysis
    (3)
  • Andrewsy
    - AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片 + AI AWS AWS Lambda AWS S3 Android AngularJs Apache Avro Apache ORC Apache Parquet Azure BI Big Data Bixby Bracket CSS Cloud Cloud Native CoffeeScript Collaboration Container Cucumber Data Analysis Data Lake Data Warehouse DataBase Dataset Docker DuckDB Flink GPT-3 GPT-4 Git Guard Guava HTML HTML5 IE IETester IaC Iceberg Installer JAX JSP Java JavaScript Kafka Keras Kubernetes LangChain LiveReload MQ MVC MVP MVVM MacOS Machine Learning Markdown Math Microservice Msi MySQL NLP Nginx OpenAI Parquet Prompt Engineering Pulumi PyTorch Python RAG React Redis Ruby SOLID SQL Sentiment Analysis Service Mesh SoftSkill Spark Spring Spring AI Sublime Text TensorFlow Tensorflow Terraform Tool Tools TypeScript VSCode Virtual Machine Web Web Design Wix iTerm2 单例模式 数据结构 算法 资源 高清图片
    @@ -2575,18 +2576,18 @@

    Andrewsy

    - - +
    @@ -2596,18 +2597,18 @@

    Andrewsy

    - - +
    @@ -2617,18 +2618,18 @@

    Andrewsy

    - - +
    @@ -2638,18 +2639,18 @@

    Andrewsy

    - - +
    @@ -2659,18 +2660,18 @@

    Andrewsy

    - - +
    @@ -2681,7 +2682,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2698,7 +2699,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2720,7 +2721,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2746,8 +2747,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2797,7 +2798,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/page/9/index.html b/page/9/index.html index 7d766b1d..c1472024 100644 --- a/page/9/index.html +++ b/page/9/index.html @@ -514,9 +514,6 @@

    - - -
    +
    + +
    + + + +
    +
    +
    + + + +
    +

  • + + +
    @@ -1833,7 +1893,7 @@

    -
    +
    @@ -1890,7 +1950,7 @@

    -
    +
    @@ -2702,7 +2707,7 @@

    Andrewsy

    -
    Archives
    +
    Archives
    @@ -2719,7 +2724,7 @@

    Andrewsy

    Articles:
    -
    168 Chapter
    +
    169 Chapter
    @@ -2741,7 +2746,7 @@

    Andrewsy

    Total words:
    -
    133.7k Count
    +
    134.6k Count
    @@ -2767,8 +2772,8 @@

    Andrewsy

    Last activity:
    - -
    2024-11-15
    + +
    2024-11-18
    @@ -2818,7 +2823,7 @@

    Andrewsy

    try { document.getElementById('last-update-show').innerHTML = timeago(new Date(lastUpDate)); } catch (error) { - document.getElementById('last-update-show').innerHTML = '2024-11-15 '; + document.getElementById('last-update-show').innerHTML = '2024-11-18 '; } diff --git a/search.xml b/search.xml index 3b6ca04c..c3b386df 100644 --- a/search.xml +++ b/search.xml @@ -3,33 +3,6 @@ - - 详解数据分析中的方差,标准差和异常值的使用 - - /2024/11/15/data-analysis-standard-deviation-variance-outliers/ - - 在数据分析中,方差(Variance)标准差(Standard Deviation)异常值(Outliers)是分析数据分布和变异性的重要统计工具。理解这些概念,并能够有效地应用它们,对于数据清洗、探索性数据分析(EDA)以及构建准确的预测模型至关重要。

    方差(Variance)

    方差是反映数据集中各数据点与数据均值之间差异的一个重要指标。它的大小可以用来衡量数据的离散程度。具体来说,方差越大,数据的变动越大,反之则越小。

    x_i为数据集中的每个数据点,
    μ 为数据集的均值,
    n 为数据的总个数。
    方差就是所有数据点与均值的差值的平方的平均值。

    方差计算时,我们将每个数据点与均值的差值进行平方,然后求平均。方差的单位是原始数据单位的平方,因此有时它的解释意义不如标准差直观。

    标准差(Standard Deviation)

    标准差是方差的平方根。与方差不同,标准差的单位与原始数据相同,因此更易于理解。标准差越大,说明数据的波动性越大;标准差越小,则说明数据较为集中。

    标准差的计算公式为:

    方差的平方根即为标准差。

    标准差与方差的关系

    标准差和方差都用来描述数据的离散程度。标准差比方差更常用,因为它的单位与数据本身一致,解释起来更加直观。

    异常值(Outliers)

    异常值是指在数据集中远离其他数据点的值。异常值的存在往往是由于数据录入错误、测量误差,或者数据本身存在极端波动。异常值会影响数据的分布,进而影响数据分析结果,尤其是均值、方差和标准差等统计量。

    如何识别异常值

    常用的异常值检测方法有:

    箱线图法(Boxplot):通过计算四分位数和四分位距(IQR)来识别异常值。通常,位于Q1 - 1.5 * IQR 或 Q3 + 1.5 * IQR之外的数据点被认为是异常值。
    Z-score法:通过计算数据点与均值的标准差倍数来判断数据点是否为异常值。一般认为,Z-score超过3或小于-3的数据为异常值。

    异常值的处理

    在数据分析中,我们通常会在数据预处理阶段识别并处理异常值。常见的处理方法包括:

    • 删除异常值:直接从数据集中删除异常值。
    • 替换异常值:用均值、中位数等替代异常值。
    • 保留异常值:在某些情况下,异常值可能包含重要信息,因此也可以选择保留异常值。

    举个列子

    假设我们有一个包含学生成绩的数据集,其中有一个异常值(200)。

    1
    2
    3
    4
    5
    6
    7
    import numpy as np
    import pandas as pd
    from scipy import stats
    import matplotlib.pyplot as plt

    # 创建数据集:一组学生成绩,其中包括异常值200
    data = [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]

    计算方差和标准差

    我们使用NumPy来计算数据的方差和标准差。

    1
    2
    3
    4
    5
    6
    7
    # 计算方差
    variance_value = np.var(data)
    print(f"方差 (Variance): {variance_value}")

    # 计算标准差
    std_dev_value = np.std(data)
    print(f"标准差 (Standard Deviation): {std_dev_value}")

    输出:

    1
    2
    方差 (Variance): 781.734375
    标准差 (Standard Deviation): 27.959513139538036

    从输出可以看到,这组数据的方差为781.73,标准差为27.95,这表明数据的离散程度相对较高。特别是最后的异常值(200)对标准差的影响很大。

    异常值检测与处理

    使用Z-score检测异常值

    我们使用Z-score来检测数据中的异常值。如果Z-score大于3或小于-3,则该数据点被认为是异常值。

    1
    2
    3
    4
    5
    6
    7
    # 计算Z-score
    z_scores = stats.zscore(data)
    print(f"Z-scores: {z_scores}")

    # 检测异常值
    outliers = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) > 3]
    print(f"检测到的异常值: {outliers}")

    使用scipystats模块可以计算Z-score。输出结果中,Z-score大于3的异常值是200。

    输出:

    1
    2
    3
    4
    Z-scores: [-0.51413628 -0.33530627 -0.15647626 -0.08494425 -0.33530627 -0.22800826
    -0.69296629 -0.58566828 -0.08494425 0.02235375 0.20118376 -0.33530627
    -0.08494425 -0.22800826 -0.33530627 3.77778395]
    检测到的异常值: [200]

    从输出结果中可以看出,Z-score大于3的异常值是200。这是由于200与其他数据点的差异过大,Z-score值为9.39,远远超过了3。

    使用箱线图检测异常值

    我们可以绘制箱线图来可视化数据并检测异常值。可以使用matplotlib库绘制箱线图。

    1
    2
    3
    4
    # 绘制箱线图
    plt.boxplot(data)
    plt.title("Boxplot Chart")
    plt.show()
    箱线图
    箱线图

    从箱线图中,200的值处于箱体外,因此被视为异常值。

    处理异常值

    在实际分析中,我们可以选择处理异常值。以下是几种常见的方法:

    删除异常值

    1
    2
    3
    # 删除异常值(Z-score大于3的点)
    cleaned_data = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) <= 3]
    print(f"删除异常值后的数据: {cleaned_data}")

    输出:

    1
    删除异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85]

    替换异常值

    1
    2
    3
    4
    # 替换异常值为中位数
    median_value = np.median(data)
    cleaned_data_with_median = [median_value if np.abs(z_scores[i]) > 3 else data[i] for i in range(len(data))]
    print(f"替换异常值后的数据: {cleaned_data_with_median}")

    输出:

    1
    替换异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 88.0]

    总结

    • 方差和标准差是用于衡量数据离散程度的基本统计量。方差的单位为原始数据单位的平方,而标准差则直接以原始单位表示,更容易解释。
    • 异常值是指那些在数据中与其他数据点差异较大的值,它们可能影响统计分析的结果。在数据清洗阶段,识别和处理异常值是至关重要的一步。

    在Python中,我们可以利用NumPySciPyMatplotlib等库来计算方差、标准差,识别异常值,并根据需要处理异常值。通过掌握这些基本概念和技术,我们数据分析师可以更有效地理解数据的分布特征,发现数据中的潜在问题,做出更加精准的数据分析。

    ]]>
    - - - - - Data Analysis - - - - - - - Data Analysis - - Math - - - -
    - - - Introduction to LangChain: Make AI Smarter and Easier to use @@ -134,10 +107,10 @@ AI - AWS - Python + AWS + SQL @@ -225,14 +198,14 @@ + Python + Pulumi Cloud AWS - Python - IaC @@ -258,14 +231,14 @@ + Python + AWS Machine Learning Sentiment Analysis - Python - @@ -289,12 +262,12 @@ + Python + Cloud AWS - Python - AWS S3 AWS Lambda @@ -310,7 +283,7 @@ /2024/08/16/introduce-is-distinct-from-in-sql/ - 在SQL查询中,比较操作符 = 通常用于检查两个值是否相等。然而,当涉及到处理缺失值(NULL)时,这种操作符就会面临挑战。为了解决这一问题,SQL 提供了 `IS DISTINCT FROM` 操作符,它用于精确比较两个值是否不同,即使这些值中有 NULL。本文将详细介绍 IS DISTINCT FROM 的语法、解决的问题以及常见的使用场景。

    语法

    IS DISTINCT FROM 的基本语法如下:

    1
    expression1 IS DISTINCT FROM expression2

    其中 expression1 和 expression2 是要进行比较的两个表达式。 该操作符返回布尔值:TRUE、FALSE。

    主要解决的问题

    在SQL中,NULL 值代表缺失或未知的数据。当两个表达式中至少有一个为 NULL 时,使用传统的比较操作符(如 = 或 <>)进行比较会导致不确定的结果。具体来说:

    • expression1 = expression2 在 expression1 或 expression2 为 NULL 时会返回 UNKNOWN。
    • expression1 <> expression2 在 expression1 或 expression2 为 NULL 时也会返回 UNKNOWN。

    比如下面的查询语句:

    1
    2
    3
    4
    5
    6
    7
    select 
    1 = NULL as A1,
    NULL <> 1 as A2,
    NULL = NULL as A3,
    NULL <> NULL as A4,
    1 = 1 as A5,
    1 <> 1 as A6

    会返回以下结果:

    A1A2A3A4A5A6
    TRUEFALSE

    可以看到,当 expression1 或 expression2 为 NULL 时,传统的比较操作符会返回 UNKNOWN空值, 如上面的A1, A2, A3, A4的结果值,这就会导致不确定性。

    IS DISTINCT FROM 操作符的出现,解决了这些问题。它能正确处理 NULL 值,会返回 TRUE 或 FALSE,确保结果的可靠性。 在以下情况下返回 TRUE:

    • expression1 和 expression2 都为 NULL。
    • expression1 和 expression2 的值不同(不论是否为 NULL)。

    而在 expression1 和 expression2 相等(包括都是 NULL)的情况下,IS DISTINCT FROM 返回 FALSE。 另外还有一个 IS NOT DISTINCT FROM 操作符,用于判断两个值是否相等。其用法一样,只是语义相反。

    下面的例子查询:

    1
    2
    3
    4
    5
    6
    7
    SELECT 
    1 IS DISTINCT FROM NULL as B1,
    NULL IS DISTINCT FROM 1 as B2,
    1 IS DISTINCT FROM 2 as B3,
    NULL IS DISTINCT FROM NULL as B4,
    1 IS DISTINCT FROM 1 as B5,
    1 IS NOT DISTINCT FROM 1 as B6

    查询结果如下:

    B1B2B3B4B5B6
    TRUETRUETRUEFALSEFALSETRUE

    可以看到,IS DISTINCT FROM 正确处理 NULL 值,返回 TRUE 或 FALSE,确保结果的可靠性。

    使用场景

    数据清洗和验证

    数据清洗数据验证过程中,经常需要检查数据库中的值是否不同,包括对 NULL 值的处理。例如,比较用户输入的数据与现有记录,以确定是否有不同的记录。使用 IS DISTINCT FROM 可以更准确地处理 NULL 值,避免出现错误或遗漏。

    1
    2
    3
    SELECT *
    FROM users
    WHERE username IS DISTINCT FROM 'andrewsy';

    这条查询会返回所有 username 与 ‘andrewsy’ 不同的记录,包括那些 username 为 NULL 的记录。

    数据更新

    在更新数据时,使用 IS DISTINCT FROM 可以确保只有在数据实际变化时才进行更新,从而避免不必要的更新操作。

    1
    2
    3
    UPDATE users
    SET email = 'new_andrewsy@email.com'
    WHERE email IS DISTINCT FROM 'new_andrewsy@email.com';

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    数据比较

    在进行复杂的数据比较时,尤其是涉及到 NULL 值时,IS DISTINCT FROM 提供了更直观的比较逻辑。例如,在合并两个数据集时,可以使用此操作符来确保唯一性。

    1
    2
    3
    4
    5
    6
    7
    8
    SELECT 
    *
    FROM
    dataset1
    FULL OUTER JOIN
    dataset2
    ON
    dataset1.id IS DISTINCT FROM dataset2.id

    这条查询会找出两个数据集中 id 不同的记录,包括 id 为 NULL 的情况。

    注意事项

    IS DISTINCT FROM 是 SQL 标准中的一部分,但并非所有数据库系统都支持。具体的支持情况需要查阅数据库的文档。在使用 IS DISTINCT FROM 时,确保数据库系统的版本和文档中对此操作符的支持及行为一致。

    总结

    IS DISTINCT FROM 是一个强大的工具,用于在 SQL 中处理包含 NULL 值的数据比较。它解决了传统比较操作符在处理 NULL 值时的不足,使得数据验证、更新和比较更加准确和可靠。在实际应用中,根据数据库系统的支持情况,合理使用 IS DISTINCT FROM 可以显著提高数据操作的精确性和健壮性。

    ]]>
    + 在SQL查询中,比较操作符 = 通常用于检查两个值是否相等。然而,当涉及到处理缺失值(NULL)时,这种操作符就会面临挑战。为了解决这一问题,SQL 提供了 `IS DISTINCT FROM` 操作符,它用于精确比较两个值是否不同,即使这些值中有 NULL。本文将详细介绍 IS DISTINCT FROM 的语法、解决的问题以及常见的使用场景。

    语法

    IS DISTINCT FROM 的基本语法如下:

    1
    expression1 IS DISTINCT FROM expression2

    其中 expression1 和 expression2 是要进行比较的两个表达式。 该操作符返回布尔值:TRUE、FALSE。

    主要解决的问题

    在SQL中,NULL 值代表缺失或未知的数据。当两个表达式中至少有一个为 NULL 时,使用传统的比较操作符(如 = 或 <>)进行比较会导致不确定的结果。具体来说:

    • expression1 = expression2 在 expression1 或 expression2 为 NULL 时会返回 UNKNOWN。
    • expression1 <> expression2 在 expression1 或 expression2 为 NULL 时也会返回 UNKNOWN。

    比如下面的查询语句:

    1
    2
    3
    4
    5
    6
    7
    select 
    1 = NULL as A1,
    NULL <> 1 as A2,
    NULL = NULL as A3,
    NULL <> NULL as A4,
    1 = 1 as A5,
    1 <> 1 as A6

    会返回以下结果:

    A1A2A3A4A5A6
    TRUEFALSE

    可以看到,当 expression1 或 expression2 为 NULL 时,传统的比较操作符会返回 UNKNOWN空值, 如上面的A1, A2, A3, A4的结果值,这就会导致不确定性。

    IS DISTINCT FROM 操作符的出现,解决了这些问题。它能正确处理 NULL 值,会返回 TRUE 或 FALSE,确保结果的可靠性。 在以下情况下返回 TRUE:

    • expression1 和 expression2 都为 NULL。
    • expression1 和 expression2 的值不同(不论是否为 NULL)。

    而在 expression1 和 expression2 相等(包括都是 NULL)的情况下,IS DISTINCT FROM 返回 FALSE。 另外还有一个 IS NOT DISTINCT FROM 操作符,用于判断两个值是否相等。其用法一样,只是语义相反。

    下面的例子查询:

    1
    2
    3
    4
    5
    6
    7
    SELECT 
    1 IS DISTINCT FROM NULL as B1,
    NULL IS DISTINCT FROM 1 as B2,
    1 IS DISTINCT FROM 2 as B3,
    NULL IS DISTINCT FROM NULL as B4,
    1 IS DISTINCT FROM 1 as B5,
    1 IS NOT DISTINCT FROM 1 as B6

    查询结果如下:

    B1B2B3B4B5B6
    TRUETRUETRUEFALSEFALSETRUE

    可以看到,IS DISTINCT FROM 正确处理 NULL 值,返回 TRUE 或 FALSE,确保结果的可靠性。

    使用场景

    数据清洗和验证

    数据清洗数据验证过程中,经常需要检查数据库中的值是否不同,包括对 NULL 值的处理。例如,比较用户输入的数据与现有记录,以确定是否有不同的记录。使用 IS DISTINCT FROM 可以更准确地处理 NULL 值,避免出现错误或遗漏。

    1
    2
    3
    SELECT *
    FROM users
    WHERE username IS DISTINCT FROM 'andrewsy';

    这条查询会返回所有 username 与 ‘andrewsy’ 不同的记录,包括那些 username 为 NULL 的记录。

    数据更新

    在更新数据时,使用 IS DISTINCT FROM 可以确保只有在数据实际变化时才进行更新,从而避免不必要的更新操作。

    1
    2
    3
    UPDATE users
    SET email = 'new_andrewsy@email.com'
    WHERE email IS DISTINCT FROM 'new_andrewsy@email.com';

    这条查询会更新所有 email 不同于 ‘new_andrewsy@email.com‘ 的记录,包括那些 email 为 NULL 的记录。

    数据比较

    在进行复杂的数据比较时,尤其是涉及到 NULL 值时,IS DISTINCT FROM 提供了更直观的比较逻辑。例如,在合并两个数据集时,可以使用此操作符来确保唯一性。

    1
    2
    3
    4
    5
    6
    7
    8
    SELECT 
    *
    FROM
    dataset1
    FULL OUTER JOIN
    dataset2
    ON
    dataset1.id IS DISTINCT FROM dataset2.id

    这条查询会找出两个数据集中 id 不同的记录,包括 id 为 NULL 的情况。

    注意事项

    IS DISTINCT FROM 是 SQL 标准中的一部分,但并非所有数据库系统都支持。具体的支持情况需要查阅数据库的文档。在使用 IS DISTINCT FROM 时,确保数据库系统的版本和文档中对此操作符的支持及行为一致。

    总结

    IS DISTINCT FROM 是一个强大的工具,用于在 SQL 中处理包含 NULL 值的数据比较。它解决了传统比较操作符在处理 NULL 值时的不足,使得数据验证、更新和比较更加准确和可靠。在实际应用中,根据数据库系统的支持情况,合理使用 IS DISTINCT FROM 可以显著提高数据操作的精确性和健壮性。

    ]]>
    @@ -430,14 +403,14 @@ + Python + Pulumi Cloud AWS - Python - IaC @@ -550,14 +523,14 @@ + Python + Pulumi Cloud AWS - Python - IaC React @@ -585,14 +558,14 @@ + Python + Pulumi Cloud AWS - Python - IaC @@ -618,14 +591,14 @@ + Python + Pulumi Cloud AWS - Python - IaC @@ -782,12 +755,12 @@ Parquet + Python + DataBase DuckDB - Python - @@ -828,7 +801,7 @@ /2023/09/10/what-is-bixby-capsule/ - Bixby Capsule

    导言


    在今天的数字时代,虚拟助手已经成为我们日常生活的一部分。Bixby,三星电子开发的人工智能助手,是其中一个备受欢迎的助手之一。Bixby Capsule 是扩展 Bixby 功能的关键组成部分,本文将介绍什么是 Bixby Capsule、它的工作原理以及如何开发自己的 Capsule。

    什么是 Bixby Capsule?


    Bixby Capsule 是一个为 Bixby 助手创建自定义功能和技能的容器。它允许开发者创建、部署和共享特定领域的虚拟助手应用程序,使用户能够通过语音和文本与虚拟助手进行交互。Capsule 的核心目标是扩展 Bixby 的能力,使其能够执行特定领域的任务,如设定闹钟、预订餐厅、查询天气、播放音乐等。

    Bixby Capsule 的工作原理


    了解 Bixby Capsule 的工作原理对于开发者非常重要。下面是 Bixby Capsule 的工作原理的简要概述:

    语音输入或文本输入
    用户通过语音或文本与 Bixby 进行交互,提出请求或问题。

    语音识别和自然语言处理
    Bixby 使用语音识别技术将用户的语音转化为文本,然后使用自然语言处理(NLP)技术理解用户的意图和需求。

    Capsule 匹配
    Bixby 确定用户的请求与哪个 Capsule 最匹配,这是通过匹配用户的意图与 Capsule 的功能来实现的。

    Capsule 交互
    一旦确定了匹配的 Capsule,Bixby 与该 Capsule 进行交互,将用户的请求传递给 Capsule。

    Capsule 执行
    Capsule 接收用户的请求并执行相关操作,可能需要与外部数据源或服务进行交互以获取信息或执行任务。

    响应用户
    Capsule 返回结果给 Bixby,然后 Bixby 将结果呈现给用户,通常以语音或文本形式。

    如何开发 Bixby Capsule?


    现在让我们来看看如何开发自己的 Bixby Capsule。以下是一个简要的步骤:

    步骤 1:准备开发环境

    在开始开发之前,您需要准备好开发环境。这包括以下步骤:

    1.1 安装 Bixby 开发工具

    Bixby 开发工具包括 Bixby IDE(集成开发环境)和 Bixby CLI(命令行工具)。可以从 Bixby Developer Center 的官方网站上下载和安装这些工具。确保您的开发环境设置正确。

    1.2 注册 Bixby 开发者账户

    在开始之前,您需要在 Bixby Developer Center 上注册一个开发者账户。这个账户将用于创建、管理和部署 Capsules。

    步骤 2:创建 Capsule

    现在,让我们创建一个新的 Capsule:

    2.1 使用 Bixby IDE 创建 Capsule

    1. 打开 Bixby IDE。
    2. 在 IDE 中选择 “File” > “New” > “Bixby Capsule”。
    3. 输入 Capsule 的名称和描述,并选择 Capsule 的类型(例如,”自定义” 或 “Smart Speaker”)。
    4. 点击 “Create” 按钮。

    2.2 定义结构、概念和操作

    在 Capsule 中,您可以定义结构(Structures)、概念(Concepts)和操作(Actions)来表示您的数据和功能:

    1. 在 IDE 中,导航到 models 目录,然后创建一个新的 .bxb 文件。
    2. 在文件中,您可以开始定义结构、概念和操作。例如:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    integer (NumDiceConcept) {
    description (The number of dice to throw.)
    }

    structure (RollResultConcept) {
    description (The result object produced by the RollDice action.)
    property (sum) {
    type (SumConcept)
    min (Required)
    max (One)
    }
    property (roll) {
    description (The list of results for each dice roll.)
    type (RollConcept)
    min (Required)
    max (Many)
    }
    }

    action (RollDice) {
    collect{
    input (numDice) {
    type (NumDiceConcept)
    min (Required)
    max (One)
    }

    input (numSides) {
    type (NumSidesConcept)
    min (Required)
    max (One)
    }
    }
    output (RollResultConcept)
    type (Calculation)
    }

    步骤 3:设计对话

    使用 Bixby IDE 的对话工具,您可以设计用户与 Capsule 的对话:

    3.1 创建对话文件

    1. 在 IDE 中,导航到 resources/dialogs 目录。
    2. 创建一个新的 .dialog 文件,为您的 Capsule 定义一个对话。

    3.2 定义用户输入和回复

    在对话文件中,定义用户输入示例和 Capsule 的回复示例:

    1
    2
    3
    4
    5
    6
    7
    8
    dialog (Result) {
    match {
    BusinessCategory (this) {
    from-property: Business (business)
    }
    }
    template("#{value(business.name)} has #{joinAs('value', this)}.")
    }

    步骤 4:测试和调试 Capsule

    在部署之前,确保您对 Capsule 进行了测试和调试:

    4.1 使用模拟器测试

    在 Bixby IDE 中,使用模拟器来模拟用户与 Capsule 的对话,以确保一切正常工作。检查回复是否符合预期。

    4.2 调试

    使用 Bixby IDE 的调试工具来查找和修复潜在问题。您可以设置断点、查看变量的值,并进行单步调试以确保 Capsule 的行为正确。

    步骤 5:部署 Capsule

    一旦您对 Capsule 满意并通过了测试,就可以开始部署它:

    5.1 创建开发版本

    在 Bixby IDE 中,您可以创建一个开发版本的 Capsule,这个版本可以在您的开发环境中使用:

    1. 选择 “Build” > “Create Development Version”。
    2. 确认创建版本并等待完成。

    5.2 提交审核

    如果您计划将 Capsule 分享给其他人或发布到 Bixby Marketplace,您需要提交审核请求:

    1. 在 Bixby Developer Center 上登录。
    2. 在开发者中心中,选择您的 Capsule 项目,然后提交审核请求。

    步骤 6:发布和分享

    一旦审核通过,您可以将 Capsule 发布并分享给其他用户:

    6.1 发布

    1. 在 Bixby Developer Center 上,选择 “发布” 选项。
    2. 输入有关 Capsule 的详细信息,包括名称、描述和图标。
    3. 发布您的 Capsule。

    6.2 分享

    您可以分享您的 Capsule 的链接给其他用户,或者在 Bixby Marketplace 上找到它

    结论


    Bixby Capsule 是一个强大的工具,可以帮助开发者创建自定义虚拟助手应用程序,提供各种功能和技能。了解其工作原理以及按照上述步骤进行开发,将使您能够构建出令人印象深刻的 Bixby Capsules,改善用户体验,扩展 Bixby 的功能。

    ]]>
    + Bixby Capsule

    导言


    在今天的数字时代,虚拟助手已经成为我们日常生活的一部分。Bixby,三星电子开发的人工智能助手,是其中一个备受欢迎的助手之一。Bixby Capsule 是扩展 Bixby 功能的关键组成部分,本文将介绍什么是 Bixby Capsule、它的工作原理以及如何开发自己的 Capsule。

    什么是 Bixby Capsule?


    Bixby Capsule 是一个为 Bixby 助手创建自定义功能和技能的容器。它允许开发者创建、部署和共享特定领域的虚拟助手应用程序,使用户能够通过语音和文本与虚拟助手进行交互。Capsule 的核心目标是扩展 Bixby 的能力,使其能够执行特定领域的任务,如设定闹钟、预订餐厅、查询天气、播放音乐等。

    Bixby Capsule 的工作原理


    了解 Bixby Capsule 的工作原理对于开发者非常重要。下面是 Bixby Capsule 的工作原理的简要概述:

    语音输入或文本输入
    用户通过语音或文本与 Bixby 进行交互,提出请求或问题。

    语音识别和自然语言处理
    Bixby 使用语音识别技术将用户的语音转化为文本,然后使用自然语言处理(NLP)技术理解用户的意图和需求。

    Capsule 匹配
    Bixby 确定用户的请求与哪个 Capsule 最匹配,这是通过匹配用户的意图与 Capsule 的功能来实现的。

    Capsule 交互
    一旦确定了匹配的 Capsule,Bixby 与该 Capsule 进行交互,将用户的请求传递给 Capsule。

    Capsule 执行
    Capsule 接收用户的请求并执行相关操作,可能需要与外部数据源或服务进行交互以获取信息或执行任务。

    响应用户
    Capsule 返回结果给 Bixby,然后 Bixby 将结果呈现给用户,通常以语音或文本形式。

    如何开发 Bixby Capsule?


    现在让我们来看看如何开发自己的 Bixby Capsule。以下是一个简要的步骤:

    步骤 1:准备开发环境

    在开始开发之前,您需要准备好开发环境。这包括以下步骤:

    1.1 安装 Bixby 开发工具

    Bixby 开发工具包括 Bixby IDE(集成开发环境)和 Bixby CLI(命令行工具)。可以从 Bixby Developer Center 的官方网站上下载和安装这些工具。确保您的开发环境设置正确。

    1.2 注册 Bixby 开发者账户

    在开始之前,您需要在 Bixby Developer Center 上注册一个开发者账户。这个账户将用于创建、管理和部署 Capsules。

    步骤 2:创建 Capsule

    现在,让我们创建一个新的 Capsule:

    2.1 使用 Bixby IDE 创建 Capsule

    1. 打开 Bixby IDE。
    2. 在 IDE 中选择 “File” > “New” > “Bixby Capsule”。
    3. 输入 Capsule 的名称和描述,并选择 Capsule 的类型(例如,”自定义” 或 “Smart Speaker”)。
    4. 点击 “Create” 按钮。

    2.2 定义结构、概念和操作

    在 Capsule 中,您可以定义结构(Structures)、概念(Concepts)和操作(Actions)来表示您的数据和功能:

    1. 在 IDE 中,导航到 models 目录,然后创建一个新的 .bxb 文件。
    2. 在文件中,您可以开始定义结构、概念和操作。例如:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    integer (NumDiceConcept) {
    description (The number of dice to throw.)
    }

    structure (RollResultConcept) {
    description (The result object produced by the RollDice action.)
    property (sum) {
    type (SumConcept)
    min (Required)
    max (One)
    }
    property (roll) {
    description (The list of results for each dice roll.)
    type (RollConcept)
    min (Required)
    max (Many)
    }
    }

    action (RollDice) {
    collect{
    input (numDice) {
    type (NumDiceConcept)
    min (Required)
    max (One)
    }

    input (numSides) {
    type (NumSidesConcept)
    min (Required)
    max (One)
    }
    }
    output (RollResultConcept)
    type (Calculation)
    }

    步骤 3:设计对话

    使用 Bixby IDE 的对话工具,您可以设计用户与 Capsule 的对话:

    3.1 创建对话文件

    1. 在 IDE 中,导航到 resources/dialogs 目录。
    2. 创建一个新的 .dialog 文件,为您的 Capsule 定义一个对话。

    3.2 定义用户输入和回复

    在对话文件中,定义用户输入示例和 Capsule 的回复示例:

    1
    2
    3
    4
    5
    6
    7
    8
    dialog (Result) {
    match {
    BusinessCategory (this) {
    from-property: Business (business)
    }
    }
    template("#{value(business.name)} has #{joinAs('value', this)}.")
    }

    步骤 4:测试和调试 Capsule

    在部署之前,确保您对 Capsule 进行了测试和调试:

    4.1 使用模拟器测试

    在 Bixby IDE 中,使用模拟器来模拟用户与 Capsule 的对话,以确保一切正常工作。检查回复是否符合预期。

    4.2 调试

    使用 Bixby IDE 的调试工具来查找和修复潜在问题。您可以设置断点、查看变量的值,并进行单步调试以确保 Capsule 的行为正确。

    步骤 5:部署 Capsule

    一旦您对 Capsule 满意并通过了测试,就可以开始部署它:

    5.1 创建开发版本

    在 Bixby IDE 中,您可以创建一个开发版本的 Capsule,这个版本可以在您的开发环境中使用:

    1. 选择 “Build” > “Create Development Version”。
    2. 确认创建版本并等待完成。

    5.2 提交审核

    如果您计划将 Capsule 分享给其他人或发布到 Bixby Marketplace,您需要提交审核请求:

    1. 在 Bixby Developer Center 上登录。
    2. 在开发者中心中,选择您的 Capsule 项目,然后提交审核请求。

    步骤 6:发布和分享

    一旦审核通过,您可以将 Capsule 发布并分享给其他用户:

    6.1 发布

    1. 在 Bixby Developer Center 上,选择 “发布” 选项。
    2. 输入有关 Capsule 的详细信息,包括名称、描述和图标。
    3. 发布您的 Capsule。

    6.2 分享

    您可以分享您的 Capsule 的链接给其他用户,或者在 Bixby Marketplace 上找到它

    结论


    Bixby Capsule 是一个强大的工具,可以帮助开发者创建自定义虚拟助手应用程序,提供各种功能和技能。了解其工作原理以及按照上述步骤进行开发,将使您能够构建出令人印象深刻的 Bixby Capsules,改善用户体验,扩展 Bixby 的功能。

    ]]>
    @@ -855,7 +828,7 @@ /2023/09/05/aws-glue-databrew-data-preparation-tool/ - 数据准备是数据分析和机器学习的关键步骤之一。AWS Glue DataBrew 是 Amazon Web Services(AWS)提供的一项强大工具,旨在帮助数据工程师、数据分析师和数据科学家轻松地准备数据以进行分析、报告和机器学习。本文将深入探讨 AWS Glue DataBrew 的特点、优势、使用场景和如何入门。
    AWS Glue DataBrew

    AWS Glue DataBrew 简介


    AWS Glue DataBrew 是一项全托管的数据准备服务,它通过可视化界面和自动化工具简化了数据清理、转换和准备的过程。以下是 AWS Glue DataBrew 的一些关键特点:

    可视化数据准备

    DataBrew 提供了直观的用户界面,使用户能够轻松地探索、清理和转换数据,而无需编写复杂的代码。

    数据探索

    您可以通过数据探索功能快速了解数据的结构、内容和质量,以便更好地理解数据。

    自动数据规范化

    DataBrew 自动检测数据类型和结构,并提供数据规范化建议,以确保数据在分析过程中的一致性。

    多源数据支持

    DataBrew 可以连接到多种数据源,包括数据湖、数据仓库、数据库、云存储和 API。

    数据转换和清洗

    您可以使用 DataBrew 进行各种数据转换和清洗操作,如删除重复数据、填充缺失值、合并列等。

    工作流程自动化

    DataBrew 支持创建数据准备工作流程,以自动执行多个数据准备任务,提高效率。

    数据监控和审计

    DataBrew 提供数据监控和审计功能,以跟踪数据准备操作,确保数据质量和安全性。

    AWS Glue DataBrew 的优势


    为什么要选择 AWS Glue DataBrew 作为数据准备工具?以下是它的一些显著优势:

    降低技术门槛

    DataBrew 的可视化界面使数据准备过程对于不擅长编程的用户也变得更加可行,降低了技术门槛。

    节省时间

    自动化功能和预建的数据转换操作可以大幅节省数据准备的时间,使用户能够更快地获得洞察。

    改进数据质量

    DataBrew 的数据探索和质量评估工具有助于发现和解决数据质量问题,提高数据分析的可靠性。

    与 AWS 生态系统集成

    DataBrew 与其他 AWS 服务集成,可无缝集成到您的数据工作流程中,如 AWS Glue、S3、Redshift 等。

    AWS Glue DataBrew 的使用场景


    AWS Glue DataBrew 适用于多种使用场景,包括但不限于:

    数据清理和规范化

    将原始数据清理并规范化,以便进行分析和报告。

    数据探索和可视化

    通过数据探索功能可视化数据,以便更好地了解数据的特点。

    缺失数据处理

    填充缺失数据或识别缺失数据的模式。

    数据合并和分割

    合并不同来源的数据或拆分包含多个值的列。

    数据质量监控

    持续监控数据质量,以及时发现问题并采取纠正措施。

    入门 AWS Glue DataBrew


    要开始使用 AWS Glue DataBrew,您可以按照以下步骤操作:

    1. 登录 AWS 控制台:使用您的 AWS 帐户登录 AWS 管理控制台。
    2. 导航到 AWS Glue DataBrew:在 AWS 控制台中,导航到 DataBrew 服务页面。
    3. 创建项目:创建一个新项目或选择现有项目,以开始数据准备工作。
    4. 导入数据:将您要准备的数据导入项目。
    5. 使用 DataBrew:在 DataBrew 的可视化界面中探索、清理和转换数据。
    6. 保存和导出数据:完成数据准备后,您可以将数据保存并导出到其他 AWS 服务或应用程序中。

    总结


    AWS Glue DataBrew 是一项强大的数据准备工具,它通过可视化界面和自动化功能使数据准备变得更加容易和高效。无论您是数据工程师、数据分析师还是数据科学家,DataBrew 都可以帮助您加速数据分析的过程,从原始数据中提取有价值的信息。开始使用 DataBrew,并体验数据准备的全新方式!

    ]]>
    + 数据准备是数据分析和机器学习的关键步骤之一。AWS Glue DataBrew 是 Amazon Web Services(AWS)提供的一项强大工具,旨在帮助数据工程师、数据分析师和数据科学家轻松地准备数据以进行分析、报告和机器学习。本文将深入探讨 AWS Glue DataBrew 的特点、优势、使用场景和如何入门。
    AWS Glue DataBrew

    AWS Glue DataBrew 简介


    AWS Glue DataBrew 是一项全托管的数据准备服务,它通过可视化界面和自动化工具简化了数据清理、转换和准备的过程。以下是 AWS Glue DataBrew 的一些关键特点:

    可视化数据准备

    DataBrew 提供了直观的用户界面,使用户能够轻松地探索、清理和转换数据,而无需编写复杂的代码。

    数据探索

    您可以通过数据探索功能快速了解数据的结构、内容和质量,以便更好地理解数据。

    自动数据规范化

    DataBrew 自动检测数据类型和结构,并提供数据规范化建议,以确保数据在分析过程中的一致性。

    多源数据支持

    DataBrew 可以连接到多种数据源,包括数据湖、数据仓库、数据库、云存储和 API。

    数据转换和清洗

    您可以使用 DataBrew 进行各种数据转换和清洗操作,如删除重复数据、填充缺失值、合并列等。

    工作流程自动化

    DataBrew 支持创建数据准备工作流程,以自动执行多个数据准备任务,提高效率。

    数据监控和审计

    DataBrew 提供数据监控和审计功能,以跟踪数据准备操作,确保数据质量和安全性。

    AWS Glue DataBrew 的优势


    为什么要选择 AWS Glue DataBrew 作为数据准备工具?以下是它的一些显著优势:

    降低技术门槛

    DataBrew 的可视化界面使数据准备过程对于不擅长编程的用户也变得更加可行,降低了技术门槛。

    节省时间

    自动化功能和预建的数据转换操作可以大幅节省数据准备的时间,使用户能够更快地获得洞察。

    改进数据质量

    DataBrew 的数据探索和质量评估工具有助于发现和解决数据质量问题,提高数据分析的可靠性。

    与 AWS 生态系统集成

    DataBrew 与其他 AWS 服务集成,可无缝集成到您的数据工作流程中,如 AWS Glue、S3、Redshift 等。

    AWS Glue DataBrew 的使用场景


    AWS Glue DataBrew 适用于多种使用场景,包括但不限于:

    数据清理和规范化

    将原始数据清理并规范化,以便进行分析和报告。

    数据探索和可视化

    通过数据探索功能可视化数据,以便更好地了解数据的特点。

    缺失数据处理

    填充缺失数据或识别缺失数据的模式。

    数据合并和分割

    合并不同来源的数据或拆分包含多个值的列。

    数据质量监控

    持续监控数据质量,以及时发现问题并采取纠正措施。

    入门 AWS Glue DataBrew


    要开始使用 AWS Glue DataBrew,您可以按照以下步骤操作:

    1. 登录 AWS 控制台:使用您的 AWS 帐户登录 AWS 管理控制台。
    2. 导航到 AWS Glue DataBrew:在 AWS 控制台中,导航到 DataBrew 服务页面。
    3. 创建项目:创建一个新项目或选择现有项目,以开始数据准备工作。
    4. 导入数据:将您要准备的数据导入项目。
    5. 使用 DataBrew:在 DataBrew 的可视化界面中探索、清理和转换数据。
    6. 保存和导出数据:完成数据准备后,您可以将数据保存并导出到其他 AWS 服务或应用程序中。

    总结


    AWS Glue DataBrew 是一项强大的数据准备工具,它通过可视化界面和自动化功能使数据准备变得更加容易和高效。无论您是数据工程师、数据分析师还是数据科学家,DataBrew 都可以帮助您加速数据分析的过程,从原始数据中提取有价值的信息。开始使用 DataBrew,并体验数据准备的全新方式!

    ]]>
    @@ -1301,7 +1274,7 @@ /2022/10/17/elastic-load-balancing-elb-overview/ - What is load balancing

    Load Balances are servers that forward traffic to multiple servers (e.g., EC2 instances) downstream.

    AWS ELB
    AWS ELB

    Why use a load balancing


    • Spread load across multiple downstream instances
    • Expose a single point of access (DNS) to your application
    • Seamlessly handle failures of downstream instances
    • Do regular health checks to your instances
    • Provide SSL termination (HTTPS) for your websites
    • Enforce stickiness with cookies
    • Hight availability across zones
    • Separate public traffic from private traffic

    Why use an Elastic Load Balancer


    • An Elastic Load Balancer is a managed load balancer
      • AWS guarantees that it will be working
      • AWS takes care of upgrades, maintenance, hight availability
      • AWS provides only a few configuration knobs
    • It costs less to setup your own load balancer but it will be a lot more effort on your end
    • It is integrarted with many AWS offerings/services
      • EC2, EC2 Auto Scaling Groups, Amazon ECS
      • AWS Certificate Manager (ACM), CloudWatch
      • Route 53, AWS WAF, AWS Global Accelerator

    Health Checks


    • Health Checks are crucial for Load Balancers
    • They enable the load balancer to know if instances it forwards traffic to are available to reply to requests
    • The health check is done on a port and a route (/health is common)
    • If the response is not 200 (OK), then the instance is unhealthy
    AWS ELB Health Check
    AWS ELB Health Check

    Types of load balancer on AWS


    • AWS has 4 kinds of managed Load Balancers
    • Classic Load Balancer (v1 - old generation) - 2009 - CLB
      • HTTP, HTTPS, TCP, SSL (secure TCP)
    • Application Load Balancer (v2 - new generation) - 2016 - ALB
      • HTTP, HTTPS, WebSocker
    • Network Load Balancer (v2 - new generation) - 2017 - NLB
      • TCP, TLC (secure TCP), UDP
    • Gateway Load Balancer - 2020 - GWLB
      • Operates at layer 3 (Network layer) - IP Protocol
    • Overall, it is recommended to use the new generation load balancers as they provides more features
    • Some load balancers can be setup as internal (private) or external (public) ELBs

    Load Balancer Security Groups


    AWS ELB Security Group
    AWS ELB Security Group
    ]]>
    + What is load balancing

    Load Balances are servers that forward traffic to multiple servers (e.g., EC2 instances) downstream.

    AWS ELB
    AWS ELB

    Why use a load balancing


    • Spread load across multiple downstream instances
    • Expose a single point of access (DNS) to your application
    • Seamlessly handle failures of downstream instances
    • Do regular health checks to your instances
    • Provide SSL termination (HTTPS) for your websites
    • Enforce stickiness with cookies
    • Hight availability across zones
    • Separate public traffic from private traffic

    Why use an Elastic Load Balancer


    • An Elastic Load Balancer is a managed load balancer
      • AWS guarantees that it will be working
      • AWS takes care of upgrades, maintenance, hight availability
      • AWS provides only a few configuration knobs
    • It costs less to setup your own load balancer but it will be a lot more effort on your end
    • It is integrarted with many AWS offerings/services
      • EC2, EC2 Auto Scaling Groups, Amazon ECS
      • AWS Certificate Manager (ACM), CloudWatch
      • Route 53, AWS WAF, AWS Global Accelerator

    Health Checks


    • Health Checks are crucial for Load Balancers
    • They enable the load balancer to know if instances it forwards traffic to are available to reply to requests
    • The health check is done on a port and a route (/health is common)
    • If the response is not 200 (OK), then the instance is unhealthy
    AWS ELB Health Check
    AWS ELB Health Check

    Types of load balancer on AWS


    • AWS has 4 kinds of managed Load Balancers
    • Classic Load Balancer (v1 - old generation) - 2009 - CLB
      • HTTP, HTTPS, TCP, SSL (secure TCP)
    • Application Load Balancer (v2 - new generation) - 2016 - ALB
      • HTTP, HTTPS, WebSocker
    • Network Load Balancer (v2 - new generation) - 2017 - NLB
      • TCP, TLC (secure TCP), UDP
    • Gateway Load Balancer - 2020 - GWLB
      • Operates at layer 3 (Network layer) - IP Protocol
    • Overall, it is recommended to use the new generation load balancers as they provides more features
    • Some load balancers can be setup as internal (private) or external (public) ELBs

    Load Balancer Security Groups


    AWS ELB Security Group
    AWS ELB Security Group
    ]]>
    @@ -1328,7 +1301,7 @@ /2022/09/21/aws-ebs-vs-aws-efs/ - Introduction

    Amazon Web Services (AWS) offers a wide range of storage solutions to cater to the diverse needs of businesses and developers. Two popular options are Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS). While both provide storage capabilities within the AWS ecosystem, they serve different purposes and have distinct characteristics. In this blog post, we will explore the key differences between AWS EBS and AWS EFS to help you make an informed choice for your specific use case.

    Amazon Elastic Block Store (EBS)


    AWS EBS
    AWS EBS
    Amazon Elastic Block Store (EBS) is a block-level storage service that provides durable and high-performance storage volumes that can be attached to Amazon Elastic Compute Cloud (EC2) instances..

    Here are some of the main characteristics and use cases of AWS EBS.

    Block Storage


    EBS provides block storage, which means it is best suited for scenarios where you need to store data at the block level, such as databases and applications that require direct access to disk devices.

    Low-Latency Performance


    EBS volumes offer low-latency, high-throughput performance, making them ideal for I/O-intensive workloads where rapid data access is crucial.

    Data Persistence


    EBS volumes are persistent, meaning the data stored on them remains intact even when the associated EC2 instance is stopped or terminated. This is useful for maintaining critical data.

    Availability and Redundancy


    EBS volumes can be replicated within a specific
    Availability Zone (AZ) for redundancy, but they are not natively designed for cross-AZ or cross-region redundancy. For cross-AZ redundancy, you need to set up additional configurations.

    Cost


    You pay for the provisioned capacity of the EBS volume, regardless of whether it is in use or not. This can make it cost-effective for certain use cases but might require careful capacity planning.

    Amazon Elastic File System (EFS)


    AWS EFS
    AWS EFS
    Amazon Elastic File System (EFS) is a fully managed

    Here are the key attributes and use cases of AWS EFS:

    File Storage


    EFS offers file-level storage, making it suitable for scenarios where multiple instances need shared access to the same data, such as web applications, content management systems, and shared repositories.

    Scalability


    EFS is designed to scale automatically as your storage needs grow. It can handle a dynamic number of EC2 instances concurrently, making it a great choice for applications with varying workloads.

    Cross-AZ and Cross-Region


    EFS provides built-in redundancy and can be accessed across multiple Availability Zones and even across regions. This ensures high availability and disaster recovery capabilities.

    Pay-as-You-Go


    With EFS, you pay for the storage capacity you use, making it a cost-effective option for applications with fluctuating storage requirements.

    Simplified Management


    EFS takes care of the underlying infrastructure and scaling, allowing you to focus on your applications without worrying about managing storage hardware.

    Choosing Between EBS and EFS


    To decide between AWS EBS and AWS EFS, consider the following factors:

    Use Case


    Determine whether your application requires block-level storage (EBS) or file-level storage (EFS).

    Performance Requirements


    Assess your performance needs. EBS is often preferred for high-performance workloads, while EFS offers good performance for a wide range of applications.

    Scalability


    Consider whether your storage needs are likely to scale over time. EFS is designed for easy scalability.

    Availability and Redundancy


    If high availability and redundancy are crucial, EFS may be a better choice due to its built-in cross-AZ and cross-region capabilities.

    Cost


    Evaluate your budget and cost considerations. EBS charges are based on provisioned capacity, while EFS charges are based on actual usage.

    Conclusion


    In summary, AWS EBS and AWS EFS are both valuable storage services within the AWS ecosystem, but they serve different purposes and have distinct characteristics. EBS is ideal for block-level storage with high-performance requirements and provides data persistence within a single Availability Zone. On the other hand, EFS is designed for scalable file-level storage. Your choice between the two should be based on your specific use case and requirements. Understanding these differences is crucial for optimizing your AWS storage strategy and ensuring the best performance and cost-efficiency for your applications.

    ]]>
    + Introduction

    Amazon Web Services (AWS) offers a wide range of storage solutions to cater to the diverse needs of businesses and developers. Two popular options are Amazon Elastic Block Store (EBS) and Amazon Elastic File System (EFS). While both provide storage capabilities within the AWS ecosystem, they serve different purposes and have distinct characteristics. In this blog post, we will explore the key differences between AWS EBS and AWS EFS to help you make an informed choice for your specific use case.

    Amazon Elastic Block Store (EBS)


    AWS EBS
    AWS EBS
    Amazon Elastic Block Store (EBS) is a block-level storage service that provides durable and high-performance storage volumes that can be attached to Amazon Elastic Compute Cloud (EC2) instances..

    Here are some of the main characteristics and use cases of AWS EBS.

    Block Storage


    EBS provides block storage, which means it is best suited for scenarios where you need to store data at the block level, such as databases and applications that require direct access to disk devices.

    Low-Latency Performance


    EBS volumes offer low-latency, high-throughput performance, making them ideal for I/O-intensive workloads where rapid data access is crucial.

    Data Persistence


    EBS volumes are persistent, meaning the data stored on them remains intact even when the associated EC2 instance is stopped or terminated. This is useful for maintaining critical data.

    Availability and Redundancy


    EBS volumes can be replicated within a specific
    Availability Zone (AZ) for redundancy, but they are not natively designed for cross-AZ or cross-region redundancy. For cross-AZ redundancy, you need to set up additional configurations.

    Cost


    You pay for the provisioned capacity of the EBS volume, regardless of whether it is in use or not. This can make it cost-effective for certain use cases but might require careful capacity planning.

    Amazon Elastic File System (EFS)


    AWS EFS
    AWS EFS
    Amazon Elastic File System (EFS) is a fully managed

    Here are the key attributes and use cases of AWS EFS:

    File Storage


    EFS offers file-level storage, making it suitable for scenarios where multiple instances need shared access to the same data, such as web applications, content management systems, and shared repositories.

    Scalability


    EFS is designed to scale automatically as your storage needs grow. It can handle a dynamic number of EC2 instances concurrently, making it a great choice for applications with varying workloads.

    Cross-AZ and Cross-Region


    EFS provides built-in redundancy and can be accessed across multiple Availability Zones and even across regions. This ensures high availability and disaster recovery capabilities.

    Pay-as-You-Go


    With EFS, you pay for the storage capacity you use, making it a cost-effective option for applications with fluctuating storage requirements.

    Simplified Management


    EFS takes care of the underlying infrastructure and scaling, allowing you to focus on your applications without worrying about managing storage hardware.

    Choosing Between EBS and EFS


    To decide between AWS EBS and AWS EFS, consider the following factors:

    Use Case


    Determine whether your application requires block-level storage (EBS) or file-level storage (EFS).

    Performance Requirements


    Assess your performance needs. EBS is often preferred for high-performance workloads, while EFS offers good performance for a wide range of applications.

    Scalability


    Consider whether your storage needs are likely to scale over time. EFS is designed for easy scalability.

    Availability and Redundancy


    If high availability and redundancy are crucial, EFS may be a better choice due to its built-in cross-AZ and cross-region capabilities.

    Cost


    Evaluate your budget and cost considerations. EBS charges are based on provisioned capacity, while EFS charges are based on actual usage.

    Conclusion


    In summary, AWS EBS and AWS EFS are both valuable storage services within the AWS ecosystem, but they serve different purposes and have distinct characteristics. EBS is ideal for block-level storage with high-performance requirements and provides data persistence within a single Availability Zone. On the other hand, EFS is designed for scalable file-level storage. Your choice between the two should be based on your specific use case and requirements. Understanding these differences is crucial for optimizing your AWS storage strategy and ensuring the best performance and cost-efficiency for your applications.

    ]]>
    @@ -1355,7 +1328,7 @@ /2022/08/13/the-elastic-file-system/ - 在Cloud时代,数据的管理和共享至关重要。AWS Elastic File System(EFS)是 Amazon Web Services(AWS)提供的一项云存储服务,旨在满足企业和开发者对可扩展、高可用性文件存储的需求。本文将深入探讨 AWS EFS 的特点、优势、使用案例以及如何开始使用它。

    AWS EFS

    AWS EFS 概览


    AWS EFS 是一种托管的网络文件存储服务,旨在提供可扩展、高可用性的文件系统,以满足各种应用程序和工作负载的存储需求。 以下是 AWS EFS 的关键特点:

    共享文件存储:EFS 允许多个 Amazon EC2 实例同时访问相同的文件系统,支持多用户和多应用程序的共享文件访问。

    自动伸缩:EFS 自动扩展以适应容量和吞吐量的需求,无需手动干预。这使得它适用于不断变化的工作负载。

    高可用性:EFS 文件系统在多个可用区(Availability Zones)内复制数据,以提供高可用性和冗余性。这意味着如果一个可用区发生故障,文件系统仍然可用。

    数据一致性:EFS 提供强一致性的数据访问,确保多个实例并发访问时的数据一致性。

    多种访问协议:EFS 支持多种文件访问协议,包括 NFSv4、NFSv3 和 Amazon EFS 文件系统客户端。

    安全性:EFS 文件系统支持 Amazon VPC(Virtual Private Cloud)网络隔离,以确保数据的安全性和隐私性。

    AWS EFS IA

    AWS EFS 的优势


    为什么选择 AWS EFS 作为文件存储解决方案? 以下是一些显著的优势:

    可扩展性:EFS 自动扩展,因此您不必担心文件系统的容量限制。它可以根据需要增加存储空间。

    高可用性:EFS 提供多可用区部署,使文件系统在单个可用区故障时保持可用,提高了应用程序的可用性。

    共享性:多个 EC2 实例可以同时访问同一文件系统,使其适用于需要多个实例之间共享文件的应用程序。

    强一致性:EFS 提供强一致性,确保多个实例并发写入或读取文件时的数据一致性。

    灵活性:您可以根据需求创建和管理多个文件系统,每个文件系统可以有不同的权限和访问策略。

    AWS EFS 的使用场景


    AWS EFS 适用于许多不同的使用场景,包括但不限于:

    Web 服务器:EFS 可用于存储 Web 服务器的静态内容、日志文件和配置文件,以实现高可用性和可扩展性。

    容器化应用程序:将容器化应用程序的配置文件和数据存储在 EFS 中,以便多个容器实例之间共享。

    大数据分析:EFS 可用于存储大数据分析工作负载的输入数据和输出结果,支持多个分析节点的并发访问。

    应用程序共享:EFS 使不同应用程序之间可以轻松地共享文件,适用于微服务架构和多个应用程序共存的情况。

    开发和测试环境:开发人员可以使用 EFS 存储开发和测试环境的代码和资源,确保一致的开发和测试数据。

    开始使用 AWS EFS


    要开始使用 AWS EFS,您可以按照以下步骤操作:

    1. 登录 AWS 控制台:使用您的 AWS 帐户登录 AWS 管理控制台。
    2. 创建 EFS 文件系统:在 AWS 控制台中导航到 EFS 服务,创建一个新的文件系统,并配置存储容量和权限。
    3. 设置访问权限:定义哪些 EC2 实例可以访问文件系统,并分配适当的权限。
    4. 将文件系统挂载到 EC2 实例:在您的 EC2 实例上挂载 EFS 文件系统,使实例能够访问共享文件。
    5. 开始使用:将您的应用程序或工作负载配置为使用挂载的 EFS 文件系统。

    总结


    AWS Elastic File System(EFS)是一项强大的云文件存储服务,为各种应用程序和业务提供可扩展,高可能性的文件存储功能。

    ]]>
    + 在Cloud时代,数据的管理和共享至关重要。AWS Elastic File System(EFS)是 Amazon Web Services(AWS)提供的一项云存储服务,旨在满足企业和开发者对可扩展、高可用性文件存储的需求。本文将深入探讨 AWS EFS 的特点、优势、使用案例以及如何开始使用它。

    AWS EFS

    AWS EFS 概览


    AWS EFS 是一种托管的网络文件存储服务,旨在提供可扩展、高可用性的文件系统,以满足各种应用程序和工作负载的存储需求。 以下是 AWS EFS 的关键特点:

    共享文件存储:EFS 允许多个 Amazon EC2 实例同时访问相同的文件系统,支持多用户和多应用程序的共享文件访问。

    自动伸缩:EFS 自动扩展以适应容量和吞吐量的需求,无需手动干预。这使得它适用于不断变化的工作负载。

    高可用性:EFS 文件系统在多个可用区(Availability Zones)内复制数据,以提供高可用性和冗余性。这意味着如果一个可用区发生故障,文件系统仍然可用。

    数据一致性:EFS 提供强一致性的数据访问,确保多个实例并发访问时的数据一致性。

    多种访问协议:EFS 支持多种文件访问协议,包括 NFSv4、NFSv3 和 Amazon EFS 文件系统客户端。

    安全性:EFS 文件系统支持 Amazon VPC(Virtual Private Cloud)网络隔离,以确保数据的安全性和隐私性。

    AWS EFS IA

    AWS EFS 的优势


    为什么选择 AWS EFS 作为文件存储解决方案? 以下是一些显著的优势:

    可扩展性:EFS 自动扩展,因此您不必担心文件系统的容量限制。它可以根据需要增加存储空间。

    高可用性:EFS 提供多可用区部署,使文件系统在单个可用区故障时保持可用,提高了应用程序的可用性。

    共享性:多个 EC2 实例可以同时访问同一文件系统,使其适用于需要多个实例之间共享文件的应用程序。

    强一致性:EFS 提供强一致性,确保多个实例并发写入或读取文件时的数据一致性。

    灵活性:您可以根据需求创建和管理多个文件系统,每个文件系统可以有不同的权限和访问策略。

    AWS EFS 的使用场景


    AWS EFS 适用于许多不同的使用场景,包括但不限于:

    Web 服务器:EFS 可用于存储 Web 服务器的静态内容、日志文件和配置文件,以实现高可用性和可扩展性。

    容器化应用程序:将容器化应用程序的配置文件和数据存储在 EFS 中,以便多个容器实例之间共享。

    大数据分析:EFS 可用于存储大数据分析工作负载的输入数据和输出结果,支持多个分析节点的并发访问。

    应用程序共享:EFS 使不同应用程序之间可以轻松地共享文件,适用于微服务架构和多个应用程序共存的情况。

    开发和测试环境:开发人员可以使用 EFS 存储开发和测试环境的代码和资源,确保一致的开发和测试数据。

    开始使用 AWS EFS


    要开始使用 AWS EFS,您可以按照以下步骤操作:

    1. 登录 AWS 控制台:使用您的 AWS 帐户登录 AWS 管理控制台。
    2. 创建 EFS 文件系统:在 AWS 控制台中导航到 EFS 服务,创建一个新的文件系统,并配置存储容量和权限。
    3. 设置访问权限:定义哪些 EC2 实例可以访问文件系统,并分配适当的权限。
    4. 将文件系统挂载到 EC2 实例:在您的 EC2 实例上挂载 EFS 文件系统,使实例能够访问共享文件。
    5. 开始使用:将您的应用程序或工作负载配置为使用挂载的 EFS 文件系统。

    总结


    AWS Elastic File System(EFS)是一项强大的云文件存储服务,为各种应用程序和业务提供可扩展,高可能性的文件存储功能。

    ]]>
    @@ -1382,7 +1355,7 @@ /2022/07/19/how-to-backup-and-restore-aws-ebs/ - 在Amazon Web Services(AWS)中,Snapshot是一种备份和恢复Amazon Elastic Block Store(EBS)卷数据的关键工具。
    AWS EBS Snapshot

    什么是Snapshot


    概念

    Snapshot是EBS卷的点对点备份,它记录了卷的特定时刻的状态,包括数据、配置和元数据。快照是存储在Amazon S3中的,因此具有高可靠性和持久性。

    特点和功能


    持久性

    Snapshot是持久性的,一旦创建,它们会一直存在,即使原始EBS卷被删除也是如此。

    增量备份

    快照仅捕获自上一个快照以来发生的更改,这降低了备份的成本和时间。

    快速创建

    Snapshot创建通常非常快,因为它只会记录发生的更改。

    版本控制

    可以创建多个快照,并根据需要还原到不同的版本,以便进行数据版本控制和历史记录。

    复制到其他区域

    可以将Snapshot复制到其他AWS区域,以增加数据的可用性和灾难恢复选项。

    创建新EBS卷

    可以使用Snapshot创建新的EBS卷,这对于在不同EC2实例之间共享数据非常有用。

    自动快照策略

    可以设置自动快照策略,以定期创建快照,从而实现自动备份。

    如何创建和使用Snapshot


    创建Snapshot

    在AWS管理控制台上,可以选择要备份的EBS卷,然后创建Snapshot。也可以使用AWS命令行工具或SDK来创建Snapshot。

    备份策略

    可以选择手动创建Snapshot,也可以设置自动快照策略来定期备份数据。

    恢复数据

    如果需要,可以使用Snapshot还原数据。可以创建新的EBS卷,然后从快照还原数据,或者将快照直接附加到现有EBS卷上。

    数据保护

    Snapshot是数据保护的关键,它可以防止因数据丢失或损坏而引发的灾难情况。

    数据复制和迁移

    可以将Snapshot复制到其他AWS区域或AWS账户,以实现数据复制和迁移。

    快照的应用场景


    数据备份和恢复

    主要用于备份重要的数据,以便在数据丢失或损坏时能够迅速恢复。

    点对点复制和迁移

    可以将快照复制到不同的 AWS 区域或 AWS 账户,以实现数据的复制和迁移。

    测试和开发

    创建快照可以帮助在不影响生产环境的情况下为测试和开发环境提供实验数据。

    版本控制和数据恢复

    可以使用快照来实现数据版本控制,允许在不同时间点恢复到不同的数据状态。

    数据分析和报告

    可以创建快照以便进行数据分析、生成报告或生成数据副本以供其他用途使用。

    快照的定价


    会被收费用于创建和保留快照的存储空间。快照的价格取决于存储的数据量。AWS 还提供了一些定价选项,如创建和保留快照的频率。具体查看 AWS 官方网站的定价详情获取最新信息。

    最佳实践和注意事项


    定期创建快照

    建议定期创建快照,以确保数据的定期备份和恢复能力。

    标记和命名

    为快照提供有意义的标记和命名,以便轻松识别和管理。

    自动化备份策略

    使用 AWS 的自动备份策略来定期创建快照,减少人工干预。

    根据需求调整快照

    根据数据的重要性和变化频率,调整快照的保留策略。

    监控和警报

    设置监控和警报,以便在快照创建或数据丢失时及时获得通知。

    总结


    总之,AWS Snapshot 用于EBS卷备份和数据保护的关键工具。它具有高度的持久性和可靠性,并支持数据版本控制、恢复、复制和迁移。通过定期创建Snapshot,可以确保数据的安全性和可用性

    ]]>
    + 在Amazon Web Services(AWS)中,Snapshot是一种备份和恢复Amazon Elastic Block Store(EBS)卷数据的关键工具。
    AWS EBS Snapshot

    什么是Snapshot


    概念

    Snapshot是EBS卷的点对点备份,它记录了卷的特定时刻的状态,包括数据、配置和元数据。快照是存储在Amazon S3中的,因此具有高可靠性和持久性。

    特点和功能


    持久性

    Snapshot是持久性的,一旦创建,它们会一直存在,即使原始EBS卷被删除也是如此。

    增量备份

    快照仅捕获自上一个快照以来发生的更改,这降低了备份的成本和时间。

    快速创建

    Snapshot创建通常非常快,因为它只会记录发生的更改。

    版本控制

    可以创建多个快照,并根据需要还原到不同的版本,以便进行数据版本控制和历史记录。

    复制到其他区域

    可以将Snapshot复制到其他AWS区域,以增加数据的可用性和灾难恢复选项。

    创建新EBS卷

    可以使用Snapshot创建新的EBS卷,这对于在不同EC2实例之间共享数据非常有用。

    自动快照策略

    可以设置自动快照策略,以定期创建快照,从而实现自动备份。

    如何创建和使用Snapshot


    创建Snapshot

    在AWS管理控制台上,可以选择要备份的EBS卷,然后创建Snapshot。也可以使用AWS命令行工具或SDK来创建Snapshot。

    备份策略

    可以选择手动创建Snapshot,也可以设置自动快照策略来定期备份数据。

    恢复数据

    如果需要,可以使用Snapshot还原数据。可以创建新的EBS卷,然后从快照还原数据,或者将快照直接附加到现有EBS卷上。

    数据保护

    Snapshot是数据保护的关键,它可以防止因数据丢失或损坏而引发的灾难情况。

    数据复制和迁移

    可以将Snapshot复制到其他AWS区域或AWS账户,以实现数据复制和迁移。

    快照的应用场景


    数据备份和恢复

    主要用于备份重要的数据,以便在数据丢失或损坏时能够迅速恢复。

    点对点复制和迁移

    可以将快照复制到不同的 AWS 区域或 AWS 账户,以实现数据的复制和迁移。

    测试和开发

    创建快照可以帮助在不影响生产环境的情况下为测试和开发环境提供实验数据。

    版本控制和数据恢复

    可以使用快照来实现数据版本控制,允许在不同时间点恢复到不同的数据状态。

    数据分析和报告

    可以创建快照以便进行数据分析、生成报告或生成数据副本以供其他用途使用。

    快照的定价


    会被收费用于创建和保留快照的存储空间。快照的价格取决于存储的数据量。AWS 还提供了一些定价选项,如创建和保留快照的频率。具体查看 AWS 官方网站的定价详情获取最新信息。

    最佳实践和注意事项


    定期创建快照

    建议定期创建快照,以确保数据的定期备份和恢复能力。

    标记和命名

    为快照提供有意义的标记和命名,以便轻松识别和管理。

    自动化备份策略

    使用 AWS 的自动备份策略来定期创建快照,减少人工干预。

    根据需求调整快照

    根据数据的重要性和变化频率,调整快照的保留策略。

    监控和警报

    设置监控和警报,以便在快照创建或数据丢失时及时获得通知。

    总结


    总之,AWS Snapshot 用于EBS卷备份和数据保护的关键工具。它具有高度的持久性和可靠性,并支持数据版本控制、恢复、复制和迁移。通过定期创建Snapshot,可以确保数据的安全性和可用性

    ]]>
    @@ -1409,7 +1382,7 @@ /2022/06/13/understanding-aws-ec2-instance-store/ - Amazon Web Services (AWS) 的弹性Cloud实例(EC2)为用户提供了多种存储选项,其中之一是实例存储(Instance Store),也被称为本地实例存储或瞬态存储。本文将深入研究 AWS EC2 实例存储,包括其定义、特点、使用场景以及最佳实践。

    什么是 EC2 实例存储?


    EC2 实例存储是 EC2 实例上提供的临时、本地存储选项。这些存储卷是物理存储设备(如硬盘驱动器)的一部分,直接连接到宿主实例,而不是通过网络进行访问。实例存储通常提供了非常高的性能,适用于需要低延迟和高吞吐量的工作负载。

    AWS EC2 Instance Store IOPS

    实例存储的特点


    临时性:实例存储是临时的,与 EC2 实例的生命周期紧密相连。当 EC2 实例停止、终止或失败时,存储中的数据将不再可用。

    高性能:实例存储通常提供非常高的 I/O 性能,适用于需要大量读写操作的应用程序,如数据库缓存或临时计算。

    低延迟:由于实例存储直接连接到宿主实例,因此具有非常低的访问延迟,适用于对速度要求极高的工作负载。

    不同于 Amazon EBS, 与 Amazon Elastic Block Store(EBS)不同,实例存储不需要预配,也没有额外的费用,但缺乏 EBS 提供的持久性和数据备份功能。

    使用场景


    缓存层:实例存储适用于临时数据,如缓存层。通过将缓存存储在实例存储上,可以提高读取速度和降低后端存储负担。

    临时计算:对于需要执行大规模数据处理的任务,实例存储可以用作临时工作空间,以加速计算过程。

    日志存储:对于需要快速记录大量日志数据的应用程序,实例存储可以提供高性能的日志存储解决方案。

    最佳实践


    备份重要数据:由于实例存储是临时性的,重要数据需要备份到持久性存储(如 Amazon EBS 或 Amazon S3)以防止数据丢失。

    了解生命周期:在使用实例存储时,了解 EC2 实例的生命周期非常重要。确保存储中的数据不会在实例终止时丢失。

    监控性能:实例存储通常提供高性能,但仍然需要监控其性能以确保正常运行。

    总结


    AWS EC2 实例存储是一种适用于需要高性能和低延迟的临时数据存储的选择。尽管它不适用于持久性数据存储,但在特定场景下,如缓存、临时计算和日志存储,它可以提供卓越的性能和效率。使用实例存储时,请谨记存储临时性质,并采取适当的备份和监控措施,以确保数据的可用性和完整性。]]>
    + Amazon Web Services (AWS) 的弹性Cloud实例(EC2)为用户提供了多种存储选项,其中之一是实例存储(Instance Store),也被称为本地实例存储或瞬态存储。本文将深入研究 AWS EC2 实例存储,包括其定义、特点、使用场景以及最佳实践。

    什么是 EC2 实例存储?


    EC2 实例存储是 EC2 实例上提供的临时、本地存储选项。这些存储卷是物理存储设备(如硬盘驱动器)的一部分,直接连接到宿主实例,而不是通过网络进行访问。实例存储通常提供了非常高的性能,适用于需要低延迟和高吞吐量的工作负载。

    AWS EC2 Instance Store IOPS

    实例存储的特点


    临时性:实例存储是临时的,与 EC2 实例的生命周期紧密相连。当 EC2 实例停止、终止或失败时,存储中的数据将不再可用。

    高性能:实例存储通常提供非常高的 I/O 性能,适用于需要大量读写操作的应用程序,如数据库缓存或临时计算。

    低延迟:由于实例存储直接连接到宿主实例,因此具有非常低的访问延迟,适用于对速度要求极高的工作负载。

    不同于 Amazon EBS, 与 Amazon Elastic Block Store(EBS)不同,实例存储不需要预配,也没有额外的费用,但缺乏 EBS 提供的持久性和数据备份功能。

    使用场景


    缓存层:实例存储适用于临时数据,如缓存层。通过将缓存存储在实例存储上,可以提高读取速度和降低后端存储负担。

    临时计算:对于需要执行大规模数据处理的任务,实例存储可以用作临时工作空间,以加速计算过程。

    日志存储:对于需要快速记录大量日志数据的应用程序,实例存储可以提供高性能的日志存储解决方案。

    最佳实践


    备份重要数据:由于实例存储是临时性的,重要数据需要备份到持久性存储(如 Amazon EBS 或 Amazon S3)以防止数据丢失。

    了解生命周期:在使用实例存储时,了解 EC2 实例的生命周期非常重要。确保存储中的数据不会在实例终止时丢失。

    监控性能:实例存储通常提供高性能,但仍然需要监控其性能以确保正常运行。

    总结


    AWS EC2 实例存储是一种适用于需要高性能和低延迟的临时数据存储的选择。尽管它不适用于持久性数据存储,但在特定场景下,如缓存、临时计算和日志存储,它可以提供卓越的性能和效率。使用实例存储时,请谨记存储临时性质,并采取适当的备份和监控措施,以确保数据的可用性和完整性。]]>
    @@ -1809,6 +1782,64 @@ + + data-analysis-z-score-for-outliers + + /2020/09/18/data-analysis-z-score-for-outliers/ + + Z-score(标准分数)

    Z-score(标准分数)是数据分析中常用的一个标准化指标,用于衡量某个数据点与其所在数据集的平均值之间的偏离程度。具体而言,它表示一个数据点距离数据集均值的标准差数值。Z-score 计算的基本原理是将数据点转化为标准化值,从而便于比较不同数据集中的数据点。

    Z-score的计算公式

    其中:

    :单个数据点的值。
    :数据集的均值(平均值)。
    :数据集的标准差。

    解释

    • :表示数据点 与数据集均值 的偏差,或称为该数据点与均值的差异。
    • :标准差,衡量数据集的离散程度。标准差越大,说明数据点之间的差异越大;标准差越小,说明数据点之间的差异越小。

    Z-score 的意义

    Z-score 的意义主要有以下几点:

    • Z-score = 0:表示数据点与均值完全一致。
    • Z-score > 0:表示数据点大于均值,即位于均值右侧。
    • Z-score < 0:表示数据点小于均值,即位于均值左侧。
    • Z-score 的绝对值较大:表示数据点距离均值较远,离群点的可能性较大;例如,Z-score 大于 3 或小于 -3 的数据点通常被认为是异常值。

    Z-score 的主要应用

    1. 标准化数据:通过 Z-score 可以将数据标准化,使得不同量纲、不同范围的数据具有可比性。例如,在机器学习中,许多算法(如 KNN、SVM)对数据的尺度敏感,因此需要对数据进行标准化处理。

    2. 异常值检测:Z-score 可以用来识别数据中的异常值。通常,当 Z-score 的绝对值大于 3 时,认为该数据点为异常值(离群点)。

    3. 比较不同数据集:如果两个数据集具有不同的均值和标准差,可以使用 Z-score 对它们进行比较,便于判断哪个数据点在其各自数据集中的位置更加偏离均值。

    举个例子

    假设我们有一个包含学生成绩的数据集 [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]。我们希望计算数据点 200 的 Z-score,并判断它是否为异常值。

    数学计算公式

    1. 计算均值

    2. 计算标准差
      标准差的计算公式为:

      代入数据集计算:

    3. 计算 Z-score

    Python 代码

    在Python中,我们可以使用 numpyscipy 库来计算 Z-score。

    1
    2
    3
    4
    5
    6
    7
    import numpy as np
    import pandas as pd
    from scipy import stats
    import matplotlib.pyplot as plt

    # 创建数据集:一组学生成绩,其中包括异常值200
    data = [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]

    计算数据集的均值和标准差和Z-score:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    # 计算均值
    mean = np.mean(data)
    print("均值 (Mean):", mean)

    # 计算标准差
    std = np.std(data)
    print("标准差 (Standard Deviation):", std)

    # 计算 Z-score
    z_scores = stats.zscore(data)
    print("200数值的Z-score:", z_scores[-1])

    输出:

    1
    2
    3
    均值 (Mean): 94.375
    标准差 (Standard Deviation): 27.959513139538036
    200数值的Z-score: 3.7777839504162842

    所以,数据点 200 的 Z-score 为 3.7777839504162842,按照 Z-score 的定义,它位于数据集的右侧,距离均值较远 ( Z-score 大于3或者小于-3 ),因此被认为是异常值。

    总结

    Z-score 是衡量数据点相对于数据集平均水平偏差程度的标准化指标,广泛应用于数据预处理异常值检测以及统计分析等领域。通过 Z-score,我们可以将不同规模、不同单位的数据转换为相同的标准尺度,方便进行比较和进一步分析。

    ]]>
    + + + + + Data Analysis + + + + + + + Data Analysis + + Math + + Python + + + +
    + + + + + 详解数据分析中的方差,标准差和异常值的使用 + + /2020/09/03/data-analysis-standard-deviation-variance-outliers/ + + 在数据分析中,方差(Variance)标准差(Standard Deviation)异常值(Outliers)是分析数据分布和变异性的重要统计工具。理解这些概念,并能够有效地应用它们,对于数据清洗、探索性数据分析(EDA)以及构建准确的预测模型至关重要。

    方差(Variance)

    方差是反映数据集中各数据点与数据均值之间差异的一个重要指标。它的大小可以用来衡量数据的离散程度。具体来说,方差越大,数据的变动越大,反之则越小。

    x_i为数据集中的每个数据点,
    μ 为数据集的均值,
    n 为数据的总个数。
    方差就是所有数据点与均值的差值的平方的平均值。

    方差计算时,我们将每个数据点与均值的差值进行平方,然后求平均。方差的单位是原始数据单位的平方,因此有时它的解释意义不如标准差直观。

    标准差(Standard Deviation)

    标准差是方差的平方根。与方差不同,标准差的单位与原始数据相同,因此更易于理解。标准差越大,说明数据的波动性越大;标准差越小,则说明数据较为集中。

    标准差的计算公式为:

    方差的平方根即为标准差。

    标准差与方差的关系

    标准差和方差都用来描述数据的离散程度。标准差比方差更常用,因为它的单位与数据本身一致,解释起来更加直观。

    异常值(Outliers)

    异常值是指在数据集中远离其他数据点的值。异常值的存在往往是由于数据录入错误、测量误差,或者数据本身存在极端波动。异常值会影响数据的分布,进而影响数据分析结果,尤其是均值、方差和标准差等统计量。

    如何识别异常值

    常用的异常值检测方法有:

    箱线图法(Boxplot):通过计算四分位数和四分位距(IQR)来识别异常值。通常,位于Q1 - 1.5 * IQR 或 Q3 + 1.5 * IQR之外的数据点被认为是异常值。
    Z-score法:通过计算数据点与均值的标准差倍数来判断数据点是否为异常值。一般认为,Z-score超过3或小于-3的数据为异常值。

    异常值的处理

    在数据分析中,我们通常会在数据预处理阶段识别并处理异常值。常见的处理方法包括:

    • 删除异常值:直接从数据集中删除异常值。
    • 替换异常值:用均值、中位数等替代异常值。
    • 保留异常值:在某些情况下,异常值可能包含重要信息,因此也可以选择保留异常值。

    举个列子

    假设我们有一个包含学生成绩的数据集,其中有一个异常值(200)。

    1
    2
    3
    4
    5
    6
    7
    import numpy as np
    import pandas as pd
    from scipy import stats
    import matplotlib.pyplot as plt

    # 创建数据集:一组学生成绩,其中包括异常值200
    data = [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 200]

    计算方差和标准差

    我们使用NumPy来计算数据的方差和标准差。

    1
    2
    3
    4
    5
    6
    7
    # 计算方差
    variance_value = np.var(data)
    print(f"方差 (Variance): {variance_value}")

    # 计算标准差
    std_dev_value = np.std(data)
    print(f"标准差 (Standard Deviation): {std_dev_value}")

    输出:

    1
    2
    方差 (Variance): 781.734375
    标准差 (Standard Deviation): 27.959513139538036

    从输出可以看到,这组数据的方差为781.73,标准差为27.95,这表明数据的离散程度相对较高。特别是最后的异常值(200)对标准差的影响很大。

    异常值检测与处理

    使用Z-score检测异常值

    我们使用Z-score来检测数据中的异常值。如果Z-score大于3或小于-3,则该数据点被认为是异常值。

    1
    2
    3
    4
    5
    6
    7
    # 计算Z-score
    z_scores = stats.zscore(data)
    print(f"Z-scores: {z_scores}")

    # 检测异常值
    outliers = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) > 3]
    print(f"检测到的异常值: {outliers}")

    使用scipystats模块可以计算Z-score。输出结果中,Z-score大于3的异常值是200。

    输出:

    1
    2
    3
    4
    Z-scores: [-0.51413628 -0.33530627 -0.15647626 -0.08494425 -0.33530627 -0.22800826
    -0.69296629 -0.58566828 -0.08494425 0.02235375 0.20118376 -0.33530627
    -0.08494425 -0.22800826 -0.33530627 3.77778395]
    检测到的异常值: [200]

    从输出结果中可以看出,Z-score大于3的异常值是200。这是由于200与其他数据点的差异过大,Z-score值为9.39,远远超过了3。

    使用箱线图检测异常值

    我们可以绘制箱线图来可视化数据并检测异常值。可以使用matplotlib库绘制箱线图。

    1
    2
    3
    4
    # 绘制箱线图
    plt.boxplot(data)
    plt.title("Boxplot Chart")
    plt.show()
    箱线图
    箱线图

    从箱线图中,200的值处于箱体外,因此被视为异常值。

    处理异常值

    在实际分析中,我们可以选择处理异常值。以下是几种常见的方法:

    删除异常值

    1
    2
    3
    # 删除异常值(Z-score大于3的点)
    cleaned_data = [data[i] for i in range(len(data)) if np.abs(z_scores[i]) <= 3]
    print(f"删除异常值后的数据: {cleaned_data}")

    输出:

    1
    删除异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85]

    替换异常值

    1
    2
    3
    4
    # 替换异常值为中位数
    median_value = np.median(data)
    cleaned_data_with_median = [median_value if np.abs(z_scores[i]) > 3 else data[i] for i in range(len(data))]
    print(f"替换异常值后的数据: {cleaned_data_with_median}")

    输出:

    1
    替换异常值后的数据: [80, 85, 90, 92, 85, 88, 75, 78, 92, 95, 100, 85, 92, 88, 85, 88.0]

    总结

    • 方差和标准差是用于衡量数据离散程度的基本统计量。方差的单位为原始数据单位的平方,而标准差则直接以原始单位表示,更容易解释。
    • 异常值是指那些在数据中与其他数据点差异较大的值,它们可能影响统计分析的结果。在数据清洗阶段,识别和处理异常值是至关重要的一步。

    在Python中,我们可以利用NumPySciPyMatplotlib等库来计算方差、标准差,识别异常值,并根据需要处理异常值。通过掌握这些基本概念和技术,我们数据分析师可以更有效地理解数据的分布特征,发现数据中的潜在问题,做出更加精准的数据分析。

    ]]>
    + + + + + Data Analysis + + + + + + + Data Analysis + + Math + + Python + + + +
    + + + 数据分析中的均值、中央値与众数 @@ -1830,6 +1861,8 @@ Math + Python +
    diff --git a/tags/Data-Analysis/index.html b/tags/Data-Analysis/index.html index d873bf85..0ddf579c 100644 --- a/tags/Data-Analysis/index.html +++ b/tags/Data-Analysis/index.html @@ -538,17 +538,17 @@

    +
    + + + +
    +

    +
    + + + +
    +
    + + + -
    +
    diff --git a/tags/Math/index.html b/tags/Math/index.html index 49101461..cfedfca1 100644 --- a/tags/Math/index.html +++ b/tags/Math/index.html @@ -538,17 +538,17 @@

    +
    + + + +
    +

    +
    + + + +
    +
    + + + -
    +
    diff --git a/tags/Python/index.html b/tags/Python/index.html index 89e1d87c..09560320 100644 --- a/tags/Python/index.html +++ b/tags/Python/index.html @@ -1018,11 +1018,81 @@

    + +
    +
    + +
    + + + +
    +
    +
    + + + +
    +
    +
    + + +
    diff --git a/tags/Python/page/2/index.html b/tags/Python/page/2/index.html new file mode 100644 index 00000000..4390a970 --- /dev/null +++ b/tags/Python/page/2/index.html @@ -0,0 +1,1582 @@ + + + + + + + + + + + + + + Tag: Python | Andrewsy's Space + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + +
    +
    + +
    +
    + Tag +
    +
    + + + + +
    +
    +
    + + + +
    +
    +
    Python Tag
    +
    + + + +
    + + + + + +
    +
    + +
    + + + +
    +
    +
    + + + +
    +
    + +
    +
    + +
    + + + + +
    + +
    +
    + + + + + + +
    + + + + + + + + + + +
    + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + +
    + + + + + + + + + \ No newline at end of file diff --git a/tags/index.html b/tags/index.html index fe7940ec..652bc15f 100644 --- a/tags/index.html +++ b/tags/index.html @@ -714,18 +714,18 @@

    - + AngularJs - 2 + title="Spring" style="background-color: #F9E79F;">Spring + 15 - + Spring - 15 + title="AngularJs" style="background-color: #F8F9F9;">AngularJs + 2 @@ -954,33 +954,33 @@

    - + Data Lake + title="Iceberg" style="background-color: #D7BDE2;">Iceberg 1 - + Data Warehouse + title="Spark" style="background-color: #FFF;">Spark 1 - + Iceberg + title="Data Lake" style="background-color: #D7BDE2;">Data Lake 1 - + Spark + title="Data Warehouse" style="background-color: #FFF;">Data Warehouse 1 @@ -994,18 +994,26 @@

    - + Data Analysis - 2 + 3 - + Math - 2 + 3 + + + + + + Python + 12 @@ -1178,14 +1186,6 @@

    - - Python - 9 - - - - OpenAI