diff --git a/.github/workflows/report.yml b/.github/workflows/report.yml new file mode 100644 index 0000000..c123db3 --- /dev/null +++ b/.github/workflows/report.yml @@ -0,0 +1,29 @@ +name: Create Report + +on: [push] +env: + GITHUB_TOKEN: ${{ github.token }} + +jobs: + publish: + runs-on: ubuntu-latest + name: Publish Document + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Publish PDF Document + uses: baileyjm02/markdown-to-pdf@v1 + id: publish-document + with: + input_dir: 'report' + output_dir: build + + images_dir: 'report/images' + image_import: './images' + build_html: false + + - name: Upload Document + uses: actions/upload-artifact@v2 + with: + name: 'report.pdf' + path: build diff --git a/.gitignore b/.gitignore index d2cfac1..7da8512 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,8 @@ deploy.config minitwit/Api/LATEST.txt .ionide *.log -internal-nlog-AspNetCore.txt \ No newline at end of file +internal-nlog-AspNetCore.txt +.vs +*.ndproj +NDependOut +node_modules diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..61823dd --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,46 @@ +{ + "cSpell.enabled": true, + "files.autoSave": "off", + "editor.formatOnSave": false, + "cSpell.words": [ + "Affero", + "BSDSESM", + "DTO's", + "Grafana", + "H", + "Højelse", + "IP's", + "J", + "Jäpelt", + "Kanban", + "Kasper", + "Kilbak", + "Kristoffer", + "Kyhl", + "Npgsql", + "OWASP", + "R", + "Røssum", + "Templater", + "VM's", + "aspnetcore", + "emja", + "hojelse", + "hotspots", + "jelse", + "jglr", + "kaky", + "krbh", + "minitwit", + "nlog", + "occured", + "pelt", + "sonarcloud", + "ssum", + "templator", + "themagicstrings", + "thhk", + "twooter", + "unfollow" + ] +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deploy.config.template b/deploy.config.template deleted file mode 100644 index 387d532..0000000 --- a/deploy.config.template +++ /dev/null @@ -1,4 +0,0 @@ -# TODO: Rename this file to deploy.config and delete this line -GITHUB_USER= -GITHUB_TOKEN= -HOST= \ No newline at end of file diff --git a/deploy.sh b/deploy.sh deleted file mode 100755 index be58951..0000000 --- a/deploy.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -. deploy.config - -apt update -apt install -y apt-transport-https ca-certificates curl software-properties-common -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - -add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable\" -apt update -apt-cache policy docker-ce -apt install -y docker-ce -docker login -u $GITHUB_USER -p $GITHUB_TOKEN -cd minitwit/Api -rm -r bin -rm -r publish -dotnet publish -c Release -o ./publish -docker build -t twooter . -docker tag twooter docker.pkg.github.com/themagicstrings/twooter/twooter -docker push docker.pkg.github.com/themagicstrings/twooter/twooter - -ssh root@$HOST " - apt update - apt install -y apt-transport-https ca-certificates curl software-properties-common - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable\" - apt update - apt-cache policy docker-ce - apt install -y docker-ce - docker login https://docker.pkg.github.com -u $GITHUB_USER -p $GITHUB_TOKEN - docker stop twooter-instance - docker rm twooter-instance - docker rmi docker.pkg.github.com/themagicstrings/twooter/twooter:latest - docker pull docker.pkg.github.com/themagicstrings/twooter/twooter:latest - docker run --rm -p 443:443 -p 80:80 --name twooter-instance docker.pkg.github.com/themagicstrings/twooter/twooter:latest -e ASPNETCORE_URLS=\"http://localhost:80;https://localhost:443\"" diff --git a/minitwit/Api.Test/BasicTemplatorTests.cs b/minitwit/Api.Test/BasicTemplatorTests.cs index 47399a9..a07351f 100644 --- a/minitwit/Api.Test/BasicTemplatorTests.cs +++ b/minitwit/Api.Test/BasicTemplatorTests.cs @@ -1,7 +1,7 @@ using Xunit; using System; -namespace Api.Test +namespace Api.Test { public class BasicTemplaterTests @@ -16,4 +16,4 @@ public void generateDateTimeStringTest() } } -} \ No newline at end of file +} diff --git a/minitwit/Api/Controllers/MinitwitController.cs b/minitwit/Api/Controllers/MinitwitController.cs index a46d05e..7694454 100644 --- a/minitwit/Api/Controllers/MinitwitController.cs +++ b/minitwit/Api/Controllers/MinitwitController.cs @@ -147,7 +147,7 @@ public async Task CreateUserAsync([FromForm]UserCreateDTO user) return generateBadRequestRegister("The username is already taken"); case EMAIL_TAKEN: return generateBadRequestRegister("The email is already taken"); - case SUCCES: + case SUCCESS: default: BasicTemplater.flashes.Add("You were successfully registered and can login now"); TotalUsers.IncTo(UserRepo.GetTotalUsers()); @@ -202,7 +202,7 @@ public async Task PostMessageAsync([FromForm] MessageCreateReques } - // Attemps to follow a user + // Attempts to follow a user [HttpPost("/{username}/follow")] public async Task FollowUserAsync([FromRoute] string username) { diff --git a/minitwit/Api/Controllers/SimulationController.cs b/minitwit/Api/Controllers/SimulationController.cs index 86b9fcc..6dcc075 100644 --- a/minitwit/Api/Controllers/SimulationController.cs +++ b/minitwit/Api/Controllers/SimulationController.cs @@ -112,7 +112,7 @@ public async Task register([FromBody] SimulationUserCreateDTO use await write_latest(); var res = await UserRepo.CreateAsync(new UserCreateDTO {Username = user.Username, Email = user.Email, Password1 = user.Pwd, Password2 = user.Pwd}); - + switch(res) { case MISSING_PASSWORD: @@ -133,7 +133,7 @@ public async Task register([FromBody] SimulationUserCreateDTO use case EMAIL_TAKEN: logger.LogError("SIMULATION: Email already taken"); return BadRequest("The email is already taken"); - case SUCCES: + case SUCCESS: default: MinitwitController.TotalUsers.IncTo(UserRepo.GetTotalUsers()); return NoContent(); @@ -183,7 +183,7 @@ public async Task user_post_message([FromBody] SimulationMessageC await write_latest(); var id = await MessageRepo.CreateAsync(message.Content, username); - if(id == -1) + if(id == -1) { logger.LogError($"SIMULATION: {username} does not exist"); return BadRequest("Message could not be recorded"); @@ -247,7 +247,7 @@ public async Task follow([FromRoute] string username) { res = await UserRepo.FollowAsync(username, body.follow); } - else + else { return BadRequest("Not a supported method"); } diff --git a/minitwit/Models/UserRepository.cs b/minitwit/Models/UserRepository.cs index ff383f2..6139924 100644 --- a/minitwit/Models/UserRepository.cs +++ b/minitwit/Models/UserRepository.cs @@ -44,7 +44,7 @@ public async Task CreateAsync(UserCreateDTO user) await context.users.AddAsync(newUser); await context.SaveChangesAsync(); - return CreateReturnType.SUCCES; + return CreateReturnType.SUCCESS; } public string HashPassword(string password) @@ -164,7 +164,7 @@ orderby m.pub_date descending public int GetTotalUsers() { var query = (from u in context.users select u.user_id).Count(); - return query; + return query; } public async Task ReadAsync(int id, int noOfMessages = int.MaxValue) { diff --git a/minitwit/Shared/CreateReturnType.cs b/minitwit/Shared/CreateReturnType.cs index 40bdca3..8871dcd 100644 --- a/minitwit/Shared/CreateReturnType.cs +++ b/minitwit/Shared/CreateReturnType.cs @@ -1,13 +1,13 @@ namespace Shared { - public enum CreateReturnType - { - MISSING_PASSWORD, - MISSING_USERNAME, - INVALID_EMAIL, - PASSWORD_MISMATCH, - USERNAME_TAKEN, - EMAIL_TAKEN, - SUCCES - } -} \ No newline at end of file + public enum CreateReturnType + { + MISSING_PASSWORD, + MISSING_USERNAME, + INVALID_EMAIL, + PASSWORD_MISMATCH, + USERNAME_TAKEN, + EMAIL_TAKEN, + SUCCESS + } +} diff --git a/report/build/report.pdf b/report/build/report.pdf new file mode 100644 index 0000000..9be66cc Binary files /dev/null and b/report/build/report.pdf differ diff --git a/report/images/ci-cd-chain.svg b/report/images/ci-cd-chain.svg new file mode 100644 index 0000000..473b153 --- /dev/null +++ b/report/images/ci-cd-chain.svg @@ -0,0 +1,306 @@ + + +G + + +cluster_Test_Coverage + +Test coverage + + +cluster_Infer + +Infer# + + +cluster_SonarCloud + +Sonar Cloud + + +cluster_TestAndDeploy + +Test and Deploy + + +cluster_Report + +Report + + + +Setup\n.NET + +Setup +.NET + + + +Collect\ntest\ncoverage + +Collect +test +coverage + + + +Setup\n.NET->Collect\ntest\ncoverage + + + + + +Generate\ncoverage\nreport + +Generate +coverage +report + + + +Collect\ntest\ncoverage->Generate\ncoverage\nreport + + + + + +Upload\ncoverage\ninfo\nto Coveralls + +Upload +coverage +info +to Coveralls + + + +Generate\ncoverage\nreport->Upload\ncoverage\ninfo\nto Coveralls + + + + + +Merge\nbutton\nenabled + +Merge +button +enabled + + + +Upload\ncoverage\ninfo\nto Coveralls->Merge\nbutton\nenabled + + + + + +Setup\n.NET + +Setup +.NET + + + +Run\nchecks + +Run +checks + + + +Setup\n.NET ->Run\nchecks + + + + + +Print\nanalysis\nresults + +Print +analysis +results + + + +Run\nchecks->Print\nanalysis\nresults + + + + + +Install\nSonarCloud\nscanner + +Install +SonarCloud +scanner + + + +Build\nand\nAnalyze + +Build +and +Analyze + + + +Install\nSonarCloud\nscanner->Build\nand\nAnalyze + + + + + +Build\nand\nAnalyze->Merge\nbutton\nenabled + + + + + +Setup\n.NET   + +Setup +.NET   + + + +Run\ntests + +Run +tests + + + +Setup\n.NET  ->Run\ntests + + + + + +Branch = main + +Branch = main + + + +Run\ntests->Branch = main + + + + + +Run\ntests->Merge\nbutton\nenabled + + + + + +Publish\nDocker\nImage + +Publish +Docker +Image + + + +Branch = main->Publish\nDocker\nImage + + + + + +Download\nimage\non\ndroplets + +Download +image +on +droplets + + + +Publish\nDocker\nImage->Download\nimage\non\ndroplets + + + + + +Start\ndocker\ncontainer + +Start +docker +container + + + +Download\nimage\non\ndroplets->Start\ndocker\ncontainer + + + + + +Generate\nPDF + +Generate +PDF + + + +Upload as artifact + +Upload as artifact + + + +Generate\nPDF->Upload as artifact + + + + + +Peer review + +Peer review + + + +Peer review->Merge\nbutton\nenabled + + + + + +Push or PR + +Push or PR + + + +Push or PR->Setup\n.NET + + + + + +Push or PR->Setup\n.NET + + + + + +Push or PR->Install\nSonarCloud\nscanner + + + + + +Push or PR->Setup\n.NET   + + + + + +Push or PR->Generate\nPDF + + + + + \ No newline at end of file diff --git a/report/images/ci-cd-chain.viz b/report/images/ci-cd-chain.viz new file mode 100644 index 0000000..ff97914 --- /dev/null +++ b/report/images/ci-cd-chain.viz @@ -0,0 +1,87 @@ + +digraph G { + subgraph cluster_Test_Coverage { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + "Setup\n.NET" -> + "Collect\ntest\ncoverage" -> + "Generate\ncoverage\nreport" -> + "Upload\ncoverage\ninfo\nto Coveralls"; + label = "Test coverage"; + } + + subgraph cluster_Infer { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + "Setup\n.NET " -> + "Run\nchecks" -> + "Print\nanalysis\nresults"; + label = "Infer#" + } + + subgraph cluster_SonarCloud { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + "Install\nSonarCloud\nscanner" -> + "Build\nand\nAnalyze"; + label = "Sonar Cloud" + } + + subgraph cluster_TestAndDeploy { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + "Setup\n.NET " -> + "Run\ntests" -> + "Branch = main" -> + "Publish\nDocker\nImage" -> + "Download\nimage\non\ndroplets" -> + "Start\ndocker\ncontainer" + + "Branch = main" [shape=diamond] + + label = "Test and Deploy" + } + + subgraph cluster_Report { + style=filled; + color=lightgrey; + node [style=filled,color=white]; + "Generate\nPDF" -> + "Upload as artifact" + label = "Report" + } + + + rankdir=LR + "Run\ntests" -> "Merge\nbutton\nenabled" + + "Upload\ncoverage\ninfo\nto Coveralls" -> + "Merge\nbutton\nenabled" + + "Print\nanalysis\nresults" -> + "Merge\nbutton\nenabled" + + "Build\nand\nAnalyze" -> + "Merge\nbutton\nenabled" + + "Peer review" -> "Merge\nbutton\nenabled" + + "Push or PR" -> "Generate\nPDF" + + "Push or PR" -> "Setup\n.NET"; +// "PR" -> "Setup\n.NET"; + + "Push or PR" -> "Setup\n.NET "; +// "PR" -> "Setup\n.NET "; + + "Push or PR" -> "Install\nSonarCloud\nscanner"; +// "PR" -> "Install\nSonarCloud\nscanner"; + + "Push or PR" -> "Setup\n.NET " + + "Push or PR" [shape=circle]; +} diff --git a/report/images/class-diagram.svg b/report/images/class-diagram.svg new file mode 100644 index 0000000..7a9dc5f --- /dev/null +++ b/report/images/class-diagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/report/images/dependencies.png b/report/images/dependencies.png new file mode 100644 index 0000000..cab38d8 Binary files /dev/null and b/report/images/dependencies.png differ diff --git a/report/images/dependency-matrix.png b/report/images/dependency-matrix.png new file mode 100644 index 0000000..c25d648 Binary files /dev/null and b/report/images/dependency-matrix.png differ diff --git a/report/images/deployment-diagram.svg b/report/images/deployment-diagram.svg new file mode 100644 index 0000000..b60534c --- /dev/null +++ b/report/images/deployment-diagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/report/images/errors.png b/report/images/errors.png new file mode 100644 index 0000000..245d1c4 Binary files /dev/null and b/report/images/errors.png differ diff --git a/report/images/module-diagram.svg b/report/images/module-diagram.svg new file mode 100644 index 0000000..dc58377 --- /dev/null +++ b/report/images/module-diagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/report/images/sequence-diagram.svg b/report/images/sequence-diagram.svg new file mode 100644 index 0000000..c85dc75 --- /dev/null +++ b/report/images/sequence-diagram.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/report/images/sonarcloud.png b/report/images/sonarcloud.png new file mode 100644 index 0000000..78e5ff6 Binary files /dev/null and b/report/images/sonarcloud.png differ diff --git a/report/images/twooter-dashboard.png b/report/images/twooter-dashboard.png new file mode 100644 index 0000000..1a34e6d Binary files /dev/null and b/report/images/twooter-dashboard.png differ diff --git a/report/report.md b/report/report.md new file mode 100644 index 0000000..8b3c3d9 --- /dev/null +++ b/report/report.md @@ -0,0 +1,403 @@ +# Twooter
- Evolving and maintaining a Twitter clone with DevOps + +
+ +Report and documentation of an _ITU-MiniTwit_ system associated with the course _DevOps, Software Evolution and Software Maintenance_ from IT University of Copenhagen. + +**Date:** + +May 19th 2021 + +**Course title:** + +_DevOps - Software Evolution and Software Maintenance_ + +**Course id:** + +BSDSESM1KU + +**Course material:** + +https://github.com/itu-devops/lecture_notes + +### Group k - The Magic Strings + +
+ + +| Name | E-mail | +| :-------------------: | :----------------: | +| Kasper S. Kyhl | kaky@itu.dk | +| Emil Jäpelt | emja@itu.dk | +| Jonas G. Røssum | jglr@itu.dk | +| Kristoffer Højelse | krbh@itu.dk | +| Thomas H. Kilbak | thhk@itu.dk | + +
+
+ +
+ +# System's Perspective + +## Design and Architecture + +The Twooter system is a Social Media platform composed of a .NET Web Api, a PostgreSQL database and an HTML-templater serving server-rendered content. The templater is not particularly advanced, and somewhat hinders the expandability of the system. Using a modern front-end framework would have been more fitting for expandability, however the templater made it simple to recreate the design of the original MiniTwit, hence why it was chosen. + +The following subsections describes the Twooter system from different viewpoints in the style of Christensen's 3+1 Architectural Viewpoints. + +### Module Viewpoint + +The structure of the modules can be seen on _Figure 1_. It contains these modules (Test-classes and some misc. files omitted): + +| Module | Purpose | +| -------- | --------------------------------------------------------------------------------------------------------------------------------------------- | +| API | The main console app with the Program and Startup classes, as well as the controllers for the web API. BasicTemplater is also situated in this module. | +| Entities | Objects for the database, as well as the database context. | +| Models | Repositories for interactions with the database. | +| Shared | Contains data transfer objects (DTO's) that are used to pass data between other classes and the database. | + +As seen on _Figure 1_, web requests are handled by two controllers. The SimulationController has to adhere to a strict specification set by the simulator, which differs a lot from how a user might want the same information presented. + +For example, where the SimulatorController might return a BadRequest, the MinitwitController would return an HTML-page containing an error message. + +For this reason MinitwitController handles all user oriented web requests that returns the information wrapped in a pretty UI. + + +![module-diagram.svg](./images/module-diagram.svg) + +_**Figure 1:** Module Diagram showing the classes and sub-modules in each module_ + +The dependencies between the most important classes and interfaces in the system can be seen on _Figure 2_. + +![class-diagram.svg](./images/class-diagram.svg) + +_**Figure 2:** Class Diagram showing inheritance and the realtions between classes_ + +### Component and Connector Viewpoint + +_Figure 3_ shows the sequence of subsystem interactions, that occur in the case that a user accesses the _/public_ endpoint. Nearly all use cases for the system follow this structure. + +> 1. A method is called on a Controller object +> 2. The Controller object checks if the user is logged in +> 3. The Controller object calls some methods on its repositories +> 4. The repositories execute the methods and returns some result to the controller +> > 5A. In the case of the MinitwitController, the result from the repositories is send to the BasicTemplater to generate HTML, or a redirect is issued \ +> > 5B. In the case of the SimulationController, the result from the repositories is formatted to adhere to the simulator +> 6. The result is returned to the User + +![sequence-diagram.svg](./images/sequence-diagram.svg) + +_**Figure 3:** Sequence Diagram of a GetPublicTimeline request_ + +### Deployment Viewpoint + +From a browser running on any device, you can access Twooter by making a web request to the IP of the Virtual Machine running the Swarm Manager. If you don't know the IP you can ask a DNS provider with the url [http://twooter.hojelse.com](http://twooter.hojelse.com). + +The Swarm manager is a Docker container assigned as manager in the Docker Swarm configuration. The swarm manager has a load balancer which routes the web request to one of the three containers each running a Twooter instance from a Docker Image. Each container is running on a separate Virtual Machine. Each container has been configured with a Docker Volume, a data repository, mounted by the container. + +The PostgreSQL database runs in a _Database Cluster_ provided by Digital Ocean. The primary database is replicated by the secondary database, such that queries can be rerouted, in the event that the primary database is unresponsive. + +Our tools for monitoring Prometheus and Grafana (see _Monitoring_), runs on a Virtual Machine exposing a Web API on ports 9090 and 3000 respectively. Prometheus scrapes and stores monitoring data by accessing the IP of the Docker Swarm Manager. Grafana is configured with two data sources: + +1. Prometheus, by accessing the Web API on port 9090 +2. The PostgreSQL database with a connection string and SQL queries. + +An overview of the deployed system can be seen on _Figure 4_. + +![deployment-diagram.svg](./images/deployment-diagram.svg) + +_**Figure 4:** Deployment Diagram. The open arrows indicate requests, and filled arrow heads indicate synchronous messages. Replies to requests are generally not shown._ + + +## Dependencies + + + +The dependencies of the program can be seen on _Figure 5_. Nodes with grey background are external dependencies that we are using, while those on white background are classes or namespaces that we have made. Notably not present on the graph is the use of .NET 5.0, as this is so all-encompassing that we did not put it on the graph to make it more readable. This graph is quite simplified, as not all dependencies are included, in order to improve readability. + +![Dependency graph](./images/dependencies.png) + +_**Figure 5:** Dependency graph for the program. Nodes marked with grey are external dependencies._ + +To get a look at the full list of dependencies, we used NDepend to generate a dependency matrix, which can be seen on _Figure 6_. The horizontal axis represents our namespaces: _Api_, _Models_, _Shared_ and _Entities_, and then along the vertical axis the dependencies are listed. Cells with numbers on them, mean that the first one has that many references to the other. For example, the API has 46 references to System.Runtime. + +![Dependency matrix](./images/dependency-matrix.png) + +_**Figure 6:** Dependency matrix. Each cell with a number represents the number of references from one namespace to the other._ + +## Current state of the system + + + +Taking a look at SonarCloud on _Figure 7_, we can see that it detects two vulnerabilities, two security hotspots and 52 code smells. The main reason that these were not resolved, is that we were not aware of these, as we did not fully utilize the possibilities of SonarCloud. The code smells are primarily related to using less strict access modifiers than what is possible, which is of little importance, because we are building an application and not a library. The vulnerabilities are about the application redirecting to pages based on the user's input. This could be an issue, but should not be a major security issue. +In terms of technical debt, it estimates 5 hours of technical debt, which is not that much considering the project has been running for about three months. + +![SonarCloud Dashboard](./images/sonarcloud.png) + +_**Figure 7:** Information from the SonarCloud dashboard. The 0.0% test coverage is because it has not been configured, since we use another service for code coverage._ + +## License + + + +The license chosen for the project is Apache License 2.0, as the project is open to be used by others. +The dependencies used all have permissive licenses that allow them to be used without obtaining a unique license for the product. It is therefore believed that the Apache License 2.0 complies with this. + +The licenses for the dependencies are listed below. We assume that each dependency also complies to the licenses of its sub-dependencies. + +- Prometheus: Apache License 2.0 [Prometheus license] +- Grafana: GNU Affero General Public License v3.0 [Grafana license] +- Entity Framework Core: Apache License 2.0 [EFCore license] +- AspNetCore: Apache License 2.0 [aspnetcore license] +- Npgsql: PostgreSQL License (BSD-style) [Npgsql license] +- PostgreSQL: PostgreSQL License (BSD-style) [PostgreSQL license] +- NLog: BSD 3-Clause "New" or "Revised" License [NLog license] +- .NET: MIT License [.NET license] + +[prometheus license]: https://github.com/prometheus/prometheus/blob/main/LICENSE +[grafana license]: https://github.com/grafana/grafana/blob/main/LICENSE +[efcore license]: https://github.com/dotnet/efcore/blob/main/LICENSE.txt +[aspnetcore license]: https://github.com/dotnet/aspnetcore/blob/main/LICENSE.txt +[npgsql license]: https://github.com/npgsql/npgsql/blob/main/LICENSE +[postgresql license]: https://www.postgresql.org/about/licence/ +[nlog license]: https://github.com/NLog/NLog/blob/dev/LICENSE.txt +[.net license]: https://github.com/dotnet/runtime/blob/main/LICENSE.TXT + +# Process' perspective + + + +## Developer interactions + + + +We have interacted with each other mainly via pull requests. In the course of the project, 55 pull requests were opened. Whenever we wanted to merge a feature-branch to the main branch, we opened a pull request such that other developers could review the changes. The primary advantage of this, is that the developers that did not work on the feature, also gets a chance to understand what is going on. That is in addition to the improvements to code-quality that reviews can yield. Less formal communication has mainly happened through Messenger, while online voice-chat has been conducted through the Discord platform, which also allowed us to do pair-programming despite not meeting physically. + +## Team organization + + + +Because of the course having weekly goals we, to some extent, worked in weekly sprints to keep up with the tasks. This is also why we strived to have at least a new release each week, however this was primarily in the beginning of the project, as quite few features were added later on. +Other than that, the team was not very strictly organized, as it is a quite small team, and the members all had a very similar way of working. That being, a fairly relaxed atmosphere, where work can be done whenever we feel like it, as long the goal is reached. +Stricter organization might have been beneficial as we were unable to meet physically, which sometimes caused us not to be available at the same time. + +## Tools in the CI/CD chain(s) + + + +When merging to the main-branch or pushing to branches that have an open pull requests to main branch, five different GitHub Actions workflows are initialized. Three of these are intended to be used to improve code-quality, one tests and deploys the project, and one generates this report. + +For an overview of the CI/CD chain, see _Figure 8_. + +### Coverage workflow + +To determine the test coverage of our test suite, a coverage workflow is used. +It consists of three steps. The first step is running the tests and generating coverage data. The second step generates a downloadable test coverage report. The third step sends the code coverage data to a service called Coveralls, that automatically displays test coverage on pull requests. + +### Infer# workflow + +Infer# is a static code analysis tool that detects null dereferences and resource leaks in the codebase. The problems reported has been of little significance, so we chose to remove it as a requirement on pull requests. + +### SonarCloud workflow + +SonarCloud is a static analysis tool that detects bugs, vulnerabilities and bad coding practices. After opening a pull request, a SonarCloud bot comments its report, such that we know if there are any problems with the code about to be merged. This occured multiple times during development. The service then provides information on why it is a problem and how to address the issue. + +### Test and deploy workflow + +The test and deploy workflow is responsible for running tests and conditionally deploying the system to production. If this workflow is triggered by a merge to the main branch, the Test and Deploy workflow continues beyond the test execution and also deploys the system. The deployment step of the workflow, releases an updated image of the application, connects to the VM containing the swarm manager, pulls down the updated image and spins up the service on all VM's. + +There is no roll-out strategy, so therefore when deploying, it closes the service, and then there is a bit of downtime until the deployment completes. If the deployment fails, then the service will be offline, until manually started again. + +![CI/CD chain](./images/ci-cd-chain.svg) + +_**Figure 8:** Graph showing the GitHub Actions workflows configured for the project._ + + +## Repository organization + + + +We have chosen a mono repository structure. This made the most sense, as our solution is a single dotnet solution, so splitting it across multiple repositories, would not work well. +This structure eliminates a lot of friction from working across different areas of the project and keeps related changes in different areas on the same branch. This also decreases friction from doing code reviews, since you only have to checkout a single branch to test a contribution, instead of multiple branches across multiple repositories. + +### Applied branching strategy +The branching strategy is based on topic branches: short lived feature branches and one main production branch. We chose this model because it keeps merge conflicts and general complexity of shipping features and bug fixes to a minimum. We have a lot of quality checks in our CI chain, which gives us the confidence to do continuously delivery. + +The main branch gets deployed to production, if all checks pass. Feature branches can only be merged if two criteria are fulfilled: + +1. All checks on the CI chain pass +2. At least one approving review + +We have also practiced rebasing our branches before merging them, in order to test that new code works with the latest code on our main branch. + +### Applied development process and tools supporting it + +We have not used any project management tools such as Kanban-boards, as we mainly stuck to the course-schedule, and because we are a quite small team, so it was fairly simple to distribute tasks. GitHub issues were used to some degree in cases where we knew about a problem, but were unable to resolve it immediately. Only six issues were opened in total, because we strived to fix problems when they came up, as resolving problems quickly was one of the main tasks in the course. + +If this project had a bigger scope, a Kanban-board on GitHub would probably have been the chosen tool. + + + +## Monitoring + + + +### DigitalOcean + +All servers and the database cluster, are provided by DigitalOcean. This gives us a fixed monitoring solution for each server/cluster. The metrics for the web servers are: CPU usage, memory usage, disk I/O, disk usage and bandwidth. For the cluster, the metrics are amount of connections, index/sequential scans and throughput. + +### Grafana + +All other metrics, that are not machine level, are available on a Grafana dashboard. Grafana is able to have many different sources of metrics to be displayed, and is therefore a good choice of dashboard for monitoring purposes. + +For the web servers, these metrics are generated by the Prometheus library for C#, and then collected and stored by a Prometheus instance running on the server. The only metric displayed on the dashboard is the amount of requests for each action and HTTP response code. + +For the database server, Grafana is able to make queries to the database to collect metrics. As of this report the collected metrics are relation sizes both in amount of rows and in MB. + +![Grafana screenshot](./images/twooter-dashboard.png) + +_**Figure 9:** The Grafana monitoring dashboard used in the project_ + + + +## Logging solution + +The solution uses a logging tool for ASP.NET Core called NLog. This allows us to make seven levels of logs (DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN), which are written to date stamped _.log_-files, formatted as defined in _nlog.config_. These log files are stored in a docker volume which is mounted to the docker container. To enable ourselves to access these logs, we have created a /logs/{h@dd-mm-yyyy} endpoint, which displays logs for one hour in a table format. Additionally, accessing the /logs endpoint, will redirect the user to the newest logs. For ease of analyzing the logs, it is possible to toggle the INFO level of logs on or off. + +Everything that is written to console will be logged by NLog. For example, uncaught exceptions will be logged as ERROR or FATAL, and the information printed when starting an ASP.NET Core application, is logged as INFO. In addition to what is automatically a part of the logging, the system writes an ERROR level log, whenever some request fails, containing information on why it failed. INFO level logs are also written when a request to post a message is received. + +After changing to a horizontally scalable setup, we did not manage to modify our logging solution to support aggregating logs from multiple sources. + +## Security assessment + +The application is protected against XSS, as all user input is sanitized using HtmlEncode from the HttpUtility package. This ensures that even if a malicious request includes JavaScript or other HTML code, it will not be executed. + +Since we use Object-Relational Mapping, we do not write SQL statements directly and the system is therefore protected against SQL injection attacks. + +The most critical attack that could occur, would be if someone got access to our database. In addition to the database having a strong password stored in GitHub Secrets, this is prevented with a whitelist of IP's that are allowed to connect to the database. In order to gain access to the database cluster, one would need to either gain access to our DigitalOcean account and whitelist their own IP, or compromise one of the whitelisted computers and obtain the database credentials. + +Authentication of http-requests to the API was implemented in the same way as the original MiniTwit. This means that there is a single authorization-token that any request must contain. This is not particularly safe, as all users send the same token, so the user is not really verified. This is related to the second security risk of [OWASP Top Ten]. However, in our case, this is considered a minor problem, as it is just a quirk of how the original MiniTwit was made. + +Our logging is also somewhat lacking, which is related to OWASP security risk number 10. A lot of information is logged, including thrown exceptions, when users post messages, etc. However, there is no warning about potential attacks, or warnings if it experiences a sudden spike in errors. This means that we can only find errors if we are looking for them, so a threat can potentially be present for a long time without us noticing. + +[owasp top ten]: https://owasp.org/www-project-top-ten/ + + + +## Scaling and high availability strategy + +### Database + +For a scalable database solution, we use a PostgreSQL database cluster via DigitalOcean, with one standby node. This solution is fully managed by DigitalOcean, which includes daily backups and automatic switchover, should the primary node fail. Should the service require scaling, read-only nodes can be added to data centers across the world, making the solution horizontally scalable. + +### Web server + +Our web application is provided by a docker swarm, consisting of one manager node and two worker nodes. In this swarm, three instances of the application are running, and should any of them fail, the manager will ensure that a new instance is started, making this a high availability setup. For horizontal scaling, more worker nodes can be added to the swarm, and the manager configured to ensure more instances exist. + +# Lessons Learned Perspective + + + +## Evolution of our database solution + +In the first iteration of the system, we used an in-memory database. This was naturally a flawed solution for any system that needs to persist data and will be redeployed with any frequency. + +We changed to a docker container running an MSSQL Server image, firstly on the webserver ([Pull request #20](https://github.com/themagicstrings/twooter/pull/20)), then on a separate DigitalOcean droplet server ([Pull request #30](https://github.com/themagicstrings/twooter/pull/30)). These solutions had some big issues. By default MSSQL Server will try to keep as much data as it can in memory to speed up queries. In our case the memory usage would steadily climb, until the container was starved for resources, and any operation would slow to a near halt causing response timeouts. + +Our attempt to fix this, was simply to not use a docker container, instead running as MSSQL Server directly on a droplet server. This did help reduce the speed at which the database would be starved, although it did still occur. To solve this we read quite a few articles on configuration issues that an MSSQL Server could have. One such issue, was that the default configuration had a maximum memory usage of around 2 TB, which is more than our server has. After correcting the configuration, it no longer would starve itself. + +This solution is however not scalable. Our final solution was a PostgreSQL database cluster provided by DigitalOcean ([Pull request #55](https://github.com/themagicstrings/twooter/pull/55)). Moving to this solution came with a few benefits. The database management is handled entirely by DigitalOcean, including standby nodes with automatic switch over on failure for high availability. Additionally, we gained the monitoring that DigitalOcean provides and the ability to maintain the database and web server from the same interface. + +One additional note, on the transition between different database management systems (i.e. MSSQL & PostgreSQL): Migrating to a new DBMS does provide some issues, as the representation of data may differ. There may exist tools that would be able to transform a snapshot of one database to another. Our solution, however, was simply retrofitting our source code, with a "data siphon" and a connection to the old and the new database, launching the program on our own machines and transferring the data this way. + +## Logging of simulator errors over time + +The course has a website that shows the number of errors found be the simulator, which is very useful to see which errors are most common in the system. A problem with this, is that it only shows the cumulative number of errors, so it is impossible to know when the errors occured. We tried to work around this by making a scraper that periodically would poll data from the site, and save it with a time-stamp. This turned out to be quite difficult, as pulling the data out of the SVG, was not that easily done. As a replacement, we made a spreadsheet where we manually put in the data every few days, as seen on _Figure 10_. + + + +
+ +![Manual logging](./images/errors.png) + +
+ +_**Figure 10:** Graphs made in Google Sheets displaying the errors from the simulator over time. The data was recorded manually._ + +Using this data, we were able to react to sudden spikes in errors, for example the rapid growth in errors of type Follow and Unfollow, caused us to investigate the problem. It turned out that the problem was due to missing users in the database, so we solved it by copying users from another group's database into ours. On the graph named _Major Errors_ it can be seen that the red and yellow lines suddenly flatten out, as the problem was resolved. +Another way we have used the graph, is to identify when the service is down, as this causes a surge in connection errors. +Ideally this tool would not be necessary, as it has to be updated manually which takes time, and the things that it warns us of, should be covered by either monitoring or logging. However, in this case where our monitoring is a bit lacking, it was a very useful tool. + +## GitHub Actions has no debugging mode + +When creating a GitHub Actions script you will never get it perfect the first time. Figuring out why the script is not behaving like it should, is without auxiliary programs a process of repeatedly pushing a new commit to GitHub, waiting for their service to run the script, and finally reading the console output on the website. See for instance [Pull request #8](https://github.com/themagicstrings/twooter/pull/8). A time consuming process which we haven't gotten around to making better. 3rd party tools are available to run GitHub Actions scripts locally, which should speed this process up a lot. It was a minor inconvenience which could have been avoided with a little preliminary research. + +## Working with DevOps +Compared to previous projects, in this project we have focused on having as much uptime of the service as possible, automating as many processes as possible, and improving our Git-workflow as a team. +Improving our Git-workflow to incorporate reviews and static analysis, is critical to also having high uptime. This is because if we merge faulty code to the main branch, then the deployment might fail, causing the service to go offline. + +Automation of, for example, running tests and deployment has been a great learning experience for us. The things we have learned in this course, has made us start to use similar tools in other projects (such as the Second Year Project on SWU). Thinking of continuous deployment from day 1 ties very well into the Scrum framework's focus on incremental user value. + + + + +# Links + +- [Main repository](https://github.com/themagicstrings/twooter) - https://github.com/themagicstrings/twooter + + Contains the entire project. + +- [Twooter](http://twooter.hojelse.com) - http://twooter.hojelse.com + + This is the link to the minitwit service itself. Includes both the minitwit application and Simulator API. + +- [Twooter logs](http://twooter.hojelse.com/logs) - http://twooter.hojelse.com/logs + + Link to the logging solution. + +- [Grafana](http://188.166.113.237:3000/) - http://188.166.113.237:3000/ + + Grafana monitoring dashboard. The username and password is 'admin'. + +- [Prometheus](http://188.166.113.237:9090/) - http://188.166.113.237:9090/ + + Prometheus metrics page. Contains all metrics for the web servers. + +- [Twooter API Spec](http://twooter.hojelse.com/swagger/index.html) - http://twooter.hojelse.com/swagger/index.html + + Automatically generated API specification. + +- [SonarCloud](https://sonarcloud.io/dashboard?id=themagicstrings_twooter) - https://sonarcloud.io/dashboard?id=themagicstrings_twooter + + Reports of code quality and code security.