From b8981ab20030f34c2ed12cd089350dc90ee1f315 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 31 Dec 2024 12:54:35 +0000 Subject: [PATCH] Deployed 4831d35 with MkDocs version: 1.6.1 --- 404.html | 2 +- blog/index.html | 2 +- changelog/index.html | 2 +- .../DesignPatterns/AbstractFactory/index.html | 2 +- .../DesignPatterns/Adapter/index.html | 2 +- .../DesignPatterns/Bridge/index.html | 2 +- .../DesignPatterns/Builder/index.html | 2 +- .../DesignPatterns/CircuitBreakers/index.html | 2 +- .../DesignPatterns/Composite/index.html | 2 +- .../DesignPatterns/Decorator/index.html | 2 +- .../DesignPatterns/Facade/index.html | 2 +- .../DesignPatterns/FactoryMethod/index.html | 2 +- .../DesignPatterns/Iterator/index.html | 2 +- .../DesignPatterns/Prototype/index.html | 2 +- .../DesignPatterns/Singleton/index.html | 2 +- .../DesignPatterns/Strategy/index.html | 2 +- .../ConcurrencyParallelism/index.html | 2 +- .../FundamentalPrinciples/DRY/index.html | 2 +- .../FundamentalPrinciples/KISS/index.html | 2 +- .../FundamentalPrinciples/SOLID/index.html | 2 +- .../FundamentalPrinciples/YAGNI/index.html | 2 +- index.html | 2 +- langdives/Java/4Pillars/index.html | 2 +- langdives/Java/AccessModifPPPPP/index.html | 2 +- langdives/Java/Collections-JCF/index.html | 2 +- langdives/Java/GarbageCollection/index.html | 2 +- langdives/Java/Gradle/index.html | 2 +- langdives/Java/JDK-JRE-JVM/index.html | 2 +- langdives/Java/Java8vs11vs17vs21/index.html | 2 +- langdives/Java/JavaPassBy/index.html | 2 +- .../Java/KeyWordsTerminolgies/index.html | 6 +- langdives/Java/Locking-Intrinsic/index.html | 2 +- .../Java/Locking-Issues-DeadLock/index.html | 2 +- .../Java/Locking-Issues-LiveLock/index.html | 2 +- .../Java/Locking-Issues-Others/index.html | 2 +- .../Java/Locking-Issues-Starvation/index.html | 2 +- langdives/Java/Locking-Reentrant/index.html | 2 +- .../Locking-ReentrantReadWrite/index.html | 2 +- .../Java/LockingIntrinsicReentrant/index.html | 2 +- langdives/Java/Maven/index.html | 2 +- langdives/Java/MavenVsGradle/index.html | 2 +- langdives/Java/MemoryModel/index.html | 2 +- .../Java/PrimitiveReferenceTypes/index.html | 8 +- .../Java/ReferenceTypesInDepth/index.html | 111 ++++++++++------ .../Java/Spring/SpringAnnotations/index.html | 2 +- langdives/Java/Spring/SpringBoot/index.html | 2 +- .../Spring/SpringCoreFramework/index.html | 2 +- .../SpringFrameworkVsSpringBoot/index.html | 2 +- langdives/Java/Spring/index.html | 2 +- langdives/Java/StreamsLambdas/index.html | 2 +- langdives/Java/ThreadPoolTuning/index.html | 2 +- langdives/Java/ThreadPools/index.html | 2 +- langdives/Java/Threads-Atomicity/index.html | 2 +- langdives/Java/Threads/index.html | 2 +- search/search_index.json | 2 +- sitemap.xml | 120 +++++++++--------- sitemap.xml.gz | Bin 811 -> 811 bytes .../HighAvailabilityFaultTolerance/index.html | 2 +- .../DockerAndK8s/index.html | 2 +- .../ElasticSearch/index.html | 2 +- techdives/DistrubutedSystems/Kafka/index.html | 2 +- techdives/DistrubutedSystems/Redis/index.html | 2 +- techdives/DistrubutedSystems/S3/index.html | 2 +- techdives/GeneralConcepts/git/index.html | 2 +- 64 files changed, 199 insertions(+), 164 deletions(-) diff --git a/404.html b/404.html index f48f5a7..ccde0ff 100644 --- a/404.html +++ b/404.html @@ -1372,7 +1372,7 @@ - Primitives References + Primitives & References diff --git a/blog/index.html b/blog/index.html index 026e88e..54d3c39 100644 --- a/blog/index.html +++ b/blog/index.html @@ -1385,7 +1385,7 @@ - Primitives References + Primitives & References diff --git a/changelog/index.html b/changelog/index.html index 338f16e..e6cf970 100644 --- a/changelog/index.html +++ b/changelog/index.html @@ -1383,7 +1383,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/AbstractFactory/index.html b/fundamentaldives/DesignPatterns/AbstractFactory/index.html index f962b96..fae89fb 100644 --- a/fundamentaldives/DesignPatterns/AbstractFactory/index.html +++ b/fundamentaldives/DesignPatterns/AbstractFactory/index.html @@ -1495,7 +1495,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Adapter/index.html b/fundamentaldives/DesignPatterns/Adapter/index.html index 367fb36..1c12417 100644 --- a/fundamentaldives/DesignPatterns/Adapter/index.html +++ b/fundamentaldives/DesignPatterns/Adapter/index.html @@ -1477,7 +1477,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Bridge/index.html b/fundamentaldives/DesignPatterns/Bridge/index.html index 455d24c..a52ee04 100644 --- a/fundamentaldives/DesignPatterns/Bridge/index.html +++ b/fundamentaldives/DesignPatterns/Bridge/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Builder/index.html b/fundamentaldives/DesignPatterns/Builder/index.html index 5e7308f..2a32c81 100644 --- a/fundamentaldives/DesignPatterns/Builder/index.html +++ b/fundamentaldives/DesignPatterns/Builder/index.html @@ -1495,7 +1495,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/CircuitBreakers/index.html b/fundamentaldives/DesignPatterns/CircuitBreakers/index.html index acbd571..9bc99b7 100644 --- a/fundamentaldives/DesignPatterns/CircuitBreakers/index.html +++ b/fundamentaldives/DesignPatterns/CircuitBreakers/index.html @@ -1537,7 +1537,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Composite/index.html b/fundamentaldives/DesignPatterns/Composite/index.html index a8baadc..6d94a3f 100644 --- a/fundamentaldives/DesignPatterns/Composite/index.html +++ b/fundamentaldives/DesignPatterns/Composite/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Decorator/index.html b/fundamentaldives/DesignPatterns/Decorator/index.html index 25631a9..dc8c32c 100644 --- a/fundamentaldives/DesignPatterns/Decorator/index.html +++ b/fundamentaldives/DesignPatterns/Decorator/index.html @@ -1495,7 +1495,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Facade/index.html b/fundamentaldives/DesignPatterns/Facade/index.html index 2052225..ad59c83 100644 --- a/fundamentaldives/DesignPatterns/Facade/index.html +++ b/fundamentaldives/DesignPatterns/Facade/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/FactoryMethod/index.html b/fundamentaldives/DesignPatterns/FactoryMethod/index.html index da248b9..77b8b25 100644 --- a/fundamentaldives/DesignPatterns/FactoryMethod/index.html +++ b/fundamentaldives/DesignPatterns/FactoryMethod/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Iterator/index.html b/fundamentaldives/DesignPatterns/Iterator/index.html index a421141..77f34eb 100644 --- a/fundamentaldives/DesignPatterns/Iterator/index.html +++ b/fundamentaldives/DesignPatterns/Iterator/index.html @@ -1495,7 +1495,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Prototype/index.html b/fundamentaldives/DesignPatterns/Prototype/index.html index 2f4b52d..751b40a 100644 --- a/fundamentaldives/DesignPatterns/Prototype/index.html +++ b/fundamentaldives/DesignPatterns/Prototype/index.html @@ -1477,7 +1477,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Singleton/index.html b/fundamentaldives/DesignPatterns/Singleton/index.html index d87e205..45c7fb3 100644 --- a/fundamentaldives/DesignPatterns/Singleton/index.html +++ b/fundamentaldives/DesignPatterns/Singleton/index.html @@ -1573,7 +1573,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/DesignPatterns/Strategy/index.html b/fundamentaldives/DesignPatterns/Strategy/index.html index 02bf2eb..5c4dde5 100644 --- a/fundamentaldives/DesignPatterns/Strategy/index.html +++ b/fundamentaldives/DesignPatterns/Strategy/index.html @@ -1486,7 +1486,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/index.html b/fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/index.html index 93bb2b7..9ab3953 100644 --- a/fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/index.html +++ b/fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/index.html @@ -1579,7 +1579,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/FundamentalPrinciples/DRY/index.html b/fundamentaldives/FundamentalPrinciples/DRY/index.html index 1cb26cc..306cafe 100644 --- a/fundamentaldives/FundamentalPrinciples/DRY/index.html +++ b/fundamentaldives/FundamentalPrinciples/DRY/index.html @@ -1486,7 +1486,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/FundamentalPrinciples/KISS/index.html b/fundamentaldives/FundamentalPrinciples/KISS/index.html index 258c8c8..fa56518 100644 --- a/fundamentaldives/FundamentalPrinciples/KISS/index.html +++ b/fundamentaldives/FundamentalPrinciples/KISS/index.html @@ -1495,7 +1495,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/FundamentalPrinciples/SOLID/index.html b/fundamentaldives/FundamentalPrinciples/SOLID/index.html index bb24c79..57a4f7c 100644 --- a/fundamentaldives/FundamentalPrinciples/SOLID/index.html +++ b/fundamentaldives/FundamentalPrinciples/SOLID/index.html @@ -1486,7 +1486,7 @@ - Primitives References + Primitives & References diff --git a/fundamentaldives/FundamentalPrinciples/YAGNI/index.html b/fundamentaldives/FundamentalPrinciples/YAGNI/index.html index 9142658..c052cb9 100644 --- a/fundamentaldives/FundamentalPrinciples/YAGNI/index.html +++ b/fundamentaldives/FundamentalPrinciples/YAGNI/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References diff --git a/index.html b/index.html index 0d854a9..10ca12f 100644 --- a/index.html +++ b/index.html @@ -1409,7 +1409,7 @@

Hey Hello Welcome 👋

- Primitives References + Primitives & References diff --git a/langdives/Java/4Pillars/index.html b/langdives/Java/4Pillars/index.html index be0d442..a412c76 100644 --- a/langdives/Java/4Pillars/index.html +++ b/langdives/Java/4Pillars/index.html @@ -1477,7 +1477,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/AccessModifPPPPP/index.html b/langdives/Java/AccessModifPPPPP/index.html index cb4f4d4..f5190de 100644 --- a/langdives/Java/AccessModifPPPPP/index.html +++ b/langdives/Java/AccessModifPPPPP/index.html @@ -1477,7 +1477,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Collections-JCF/index.html b/langdives/Java/Collections-JCF/index.html index af0db89..fdd50eb 100644 --- a/langdives/Java/Collections-JCF/index.html +++ b/langdives/Java/Collections-JCF/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/GarbageCollection/index.html b/langdives/Java/GarbageCollection/index.html index f2fbc53..0156455 100644 --- a/langdives/Java/GarbageCollection/index.html +++ b/langdives/Java/GarbageCollection/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Gradle/index.html b/langdives/Java/Gradle/index.html index fe27979..a4a067f 100644 --- a/langdives/Java/Gradle/index.html +++ b/langdives/Java/Gradle/index.html @@ -1555,7 +1555,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/JDK-JRE-JVM/index.html b/langdives/Java/JDK-JRE-JVM/index.html index 385bf8e..3240c34 100644 --- a/langdives/Java/JDK-JRE-JVM/index.html +++ b/langdives/Java/JDK-JRE-JVM/index.html @@ -1492,7 +1492,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Java8vs11vs17vs21/index.html b/langdives/Java/Java8vs11vs17vs21/index.html index cf57f00..10823e0 100644 --- a/langdives/Java/Java8vs11vs17vs21/index.html +++ b/langdives/Java/Java8vs11vs17vs21/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/JavaPassBy/index.html b/langdives/Java/JavaPassBy/index.html index 461a6d3..401e8c2 100644 --- a/langdives/Java/JavaPassBy/index.html +++ b/langdives/Java/JavaPassBy/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/KeyWordsTerminolgies/index.html b/langdives/Java/KeyWordsTerminolgies/index.html index 67f3f9c..db82947 100644 --- a/langdives/Java/KeyWordsTerminolgies/index.html +++ b/langdives/Java/KeyWordsTerminolgies/index.html @@ -1504,7 +1504,7 @@ - Primitives References + Primitives & References @@ -2848,13 +2848,13 @@

Others + -

How Array References Work:

-
+

Classes and Objects

When you create an object using new, the reference variable points to the object in heap memory.

@@ -2852,15 +2892,14 @@

Classes and Objects System.out.println(p1.name); // Output: Bob (both references point to the same object) -

How References Work with Objects:

-
+

Wrapper Classes

-

Wrapper classes (Integer, Double, Boolean, etc.) wrap primitive types into objects. These are reference types, and Java performs autoboxing/unboxing to convert between primitive types and wrapper objects.

+

Wrapper classes (Integer, Double, Boolean, etc.) wrap primitive types into objects. These are reference types, and Java performs autoboxing/unboxing to convert between primitive types and wrapper objects.

Example
Integer num1 = 100;
@@ -2876,12 +2915,11 @@ 

Wrapper Classes

Wrapper Caching

    -
  • Java caches Integer objects in the range -128 to 127 for performance.
  • -
  • Beyond this range, new objects are created.
  • +
  • Java caches Integer objects in the range -128 to 127 for performance.
  • +
  • Beyond this range, new objects are created.
-

Reference and Deep Copy

-

Shallow Copy: Copies only the reference, so both variables refer to the same object.

+

Shallow Copy: Copies only the reference, so both variables refer to the same object.

Example
int[] original = {1, 2, 3};
@@ -2891,7 +2929,7 @@ 

Reference and Deep CopySystem.out.println(original[0]); // Output: 100

-

Deep Copy: Creates a new object with the same data.

+

Deep Copy: Creates a new object with the same data.

Example
int[] original = {1, 2, 3};
@@ -2901,34 +2939,31 @@ 

Reference and Deep CopySystem.out.println(original[0]); // Output: 1

-

Null/NullPointerException

-

When a reference is not initialized, it holds the value null. Accessing a field or method on a null reference throws a NullPointerException.

+

When a reference is not initialized, it holds the value null. Accessing a field or method on a null reference throws a NullPointerException.

Example
Person p = null;
 System.out.println(p.name);  // Throws NullPointerException
 
-

Garbage Collection

-

Java uses Garbage Collection to manage memory. When no references point to an object, it becomes eligible for garbage collection.

+

Java uses Garbage Collection to manage memory. When no references point to an object, it becomes eligible for garbage collection.

Example
Person p1 = new Person();  // Object created
 p1 = null;  // Now eligible for garbage collection
 
-
+

We will learn about garbage collection more in depth in another article.

Summary

    -
  • Strings: Immutable, stored in the String Pool if created with literals. new String() creates a separate object.
  • +
  • Strings: Immutable, stored in the String Pool if created with literals. new String() creates a separate object.
  • Arrays: Reference types, so multiple variables can point to the same array object.
  • -
  • Classes: Objects are referenced in memory; multiple references can point to the same object.
  • +
  • Classes: Objects are referenced in memory multiple references can point to same object.
  • Wrapper Classes: Use caching for certain ranges (e.g., Integer values from -128 to 127).
  • Garbage Collection: Objects are eligible for garbage collection when no active references point to them.
-

String Pool In Depth

The String Pool (also called the intern pool) in Java is implemented using a Hash Table-like data structure internally. Let’s explore the design and behavior behind this structure:

Internals

@@ -3016,7 +3051,7 @@

String pool Summary - December 10, 2024 + December 31, 2024 @@ -3084,7 +3119,7 @@

String pool Summary - +

diff --git a/langdives/Java/Spring/SpringAnnotations/index.html b/langdives/Java/Spring/SpringAnnotations/index.html index c7ff6fc..d396183 100644 --- a/langdives/Java/Spring/SpringAnnotations/index.html +++ b/langdives/Java/Spring/SpringAnnotations/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Spring/SpringBoot/index.html b/langdives/Java/Spring/SpringBoot/index.html index f57cb13..1da84ef 100644 --- a/langdives/Java/Spring/SpringBoot/index.html +++ b/langdives/Java/Spring/SpringBoot/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Spring/SpringCoreFramework/index.html b/langdives/Java/Spring/SpringCoreFramework/index.html index 43bff4e..e960a27 100644 --- a/langdives/Java/Spring/SpringCoreFramework/index.html +++ b/langdives/Java/Spring/SpringCoreFramework/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Spring/SpringFrameworkVsSpringBoot/index.html b/langdives/Java/Spring/SpringFrameworkVsSpringBoot/index.html index 10f1466..09bcf3a 100644 --- a/langdives/Java/Spring/SpringFrameworkVsSpringBoot/index.html +++ b/langdives/Java/Spring/SpringFrameworkVsSpringBoot/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Spring/index.html b/langdives/Java/Spring/index.html index a47d596..6314ba0 100644 --- a/langdives/Java/Spring/index.html +++ b/langdives/Java/Spring/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/StreamsLambdas/index.html b/langdives/Java/StreamsLambdas/index.html index 011e5b1..acdfc35 100644 --- a/langdives/Java/StreamsLambdas/index.html +++ b/langdives/Java/StreamsLambdas/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/ThreadPoolTuning/index.html b/langdives/Java/ThreadPoolTuning/index.html index 96d37a5..e8c8ed8 100644 --- a/langdives/Java/ThreadPoolTuning/index.html +++ b/langdives/Java/ThreadPoolTuning/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/ThreadPools/index.html b/langdives/Java/ThreadPools/index.html index 9b1ec60..e8c6af6 100644 --- a/langdives/Java/ThreadPools/index.html +++ b/langdives/Java/ThreadPools/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Threads-Atomicity/index.html b/langdives/Java/Threads-Atomicity/index.html index 7dd5c01..3c90e10 100644 --- a/langdives/Java/Threads-Atomicity/index.html +++ b/langdives/Java/Threads-Atomicity/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/langdives/Java/Threads/index.html b/langdives/Java/Threads/index.html index 7024bec..10a7e38 100644 --- a/langdives/Java/Threads/index.html +++ b/langdives/Java/Threads/index.html @@ -1390,7 +1390,7 @@ - Primitives References + Primitives & References diff --git a/search/search_index.json b/search/search_index.json index 5495d2f..73bb9b0 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\u200b\\-_,:!=\\[\\]()\"`/]+|\\.(?!\\d)|&[lg]t;|(?!\\b)(?=[A-Z][a-z])","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Under the Hood","text":"

Welcome to Deep Dives by MG

"},{"location":"blog/","title":"Blog","text":""},{"location":"blog/#coming-soon","title":"Coming soon","text":""},{"location":"changelog/","title":"Changelog","text":""},{"location":"changelog/#under-the-hood-by-mrudhul-guda","title":"Under the Hood by Mrudhul Guda","text":""},{"location":"changelog/#0.4.0","title":"0.4.0 November 16, 2024","text":""},{"location":"changelog/#0.3.0","title":"0.3.0 November 12, 2024","text":""},{"location":"changelog/#0.2.0","title":"0.2.0 November 9, 2024","text":""},{"location":"changelog/#0.1.0","title":"0.1.0 November 5, 2024","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/","title":"Abstract Factory","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#what","title":"What ?","text":"

The Abstract Factory Pattern is a creational design pattern that provides an interface for creating families of related or dependent objects without specifying their concrete classes. It promotes loose coupling between client code and the actual implementations, allowing the code to be more flexible and scalable.

The Abstract Factory pattern works as a super-factory that creates other factories. Each factory produced by the abstract factory is responsible for creating a family of related objects.

Key Characteristics

Class Diagram

AbstractFactory\n\u251c\u2500\u2500 createProductA()\n\u2514\u2500\u2500 createProductB()\n\nConcreteFactory1 \u2500\u2500\u2500\u2500> ProductA1, ProductB1\nConcreteFactory2 \u2500\u2500\u2500\u2500> ProductA2, ProductB2\n\nClient \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500> AbstractFactory, AbstractProduct\n
"},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s go with an example Imagine you are creating a UI component factory. Your application can switch between two themes: Dark Theme and Light Theme. Both themes provide the same types of components (buttons, text fields) but with different appearances.

Step-1: Define the Abstract Products
// Abstract Product: Button\npublic interface Button {\n    void render();\n}\n\n// Abstract Product: TextField\npublic interface TextField {\n    void render();\n}\n
Step-2: Create Concrete Products
// Concrete Product: Light Button\npublic class LightButton implements Button {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Light Button\");\n    }\n}\n\n// Concrete Product: Dark Button\npublic class DarkButton implements Button {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Dark Button\");\n    }\n}\n\n// Concrete Product: Light TextField\npublic class LightTextField implements TextField {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Light Text Field\");\n    }\n}\n\n// Concrete Product: Dark TextField\npublic class DarkTextField implements TextField {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Dark Text Field\");\n    }\n
Step-3: Define the Abstract Factory Interface
public interface UIFactory {\n    Button createButton();\n    TextField createTextField();\n}\n
Step-4: Implement Concrete Factories
// Concrete Factory for Light Theme\npublic class LightUIFactory implements UIFactory {\n    @Override\n    public Button createButton() {\n        return new LightButton();\n    }\n\n    @Override\n    public TextField createTextField() {\n        return new LightTextField();\n    }\n}\n\n// Concrete Factory for Dark Theme\npublic class DarkUIFactory implements UIFactory {\n    @Override\n    public Button createButton() {\n        return new DarkButton();\n    }\n\n    @Override\n    public TextField createTextField() {\n        return new DarkTextField();\n    }\n}\n
Step-5: Using the Abstract Factory in a Client
public class Application {\n    private Button button;\n    private TextField textField;\n\n    public Application(UIFactory factory) {\n        this.button = factory.createButton();\n        this.textField = factory.createTextField();\n    }\n\n    public void renderUI() {\n        button.render();\n        textField.render();\n    }\n\n    public static void main(String[] args) {\n        // Client can choose between different factories.\n        UIFactory factory = new DarkUIFactory(); // Could be switched to LightUIFactory\n        Application app = new Application(factory);\n        app.renderUI();\n    }\n}\n

Output:

Rendering a Dark Button\nRendering a Dark Text Field\n

Spring Boot Example

In Spring Boot, the Abstract Factory pattern can complement dependency injection (DI) by delegating object creation logic to the factory. Here\u2019s how to implement it with Spring Boot.

Step-1: Define Factory Beans
@Configuration\npublic class UIFactoryConfig {\n\n    @Bean\n    public UIFactory uiFactory(@Value(\"${app.theme}\") String theme) {\n        if (\"dark\".equalsIgnoreCase(theme)) {\n            return new DarkUIFactory();\n        } else {\n            return new LightUIFactory();\n        }\n    }\n}\n
Step-2: Use the Factory in a Controller
@RestController\n@RequestMapping(\"/ui\")\npublic class UIController {\n\n    private final UIFactory uiFactory;\n\n    @Autowired\n    public UIController(UIFactory uiFactory) {\n        this.uiFactory = uiFactory;\n    }\n\n    @GetMapping(\"/render\")\n    public void renderUI() {\n        Button button = uiFactory.createButton();\n        TextField textField = uiFactory.createTextField();\n\n        button.render();\n        textField.render();\n    }\n}\n
Step-3: Configure Application Properties
# application.properties\napp.theme=dark\n

In this example, the theme is configured through the application.properties file, and the factory selection is handled by the Spring context.

"},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#factory-method-comparison","title":"Factory Method Comparison","text":"Aspect Factory Method Abstract Factory Purpose Create one type of product. Create families of related products. Complexity Less complex. More complex, involves multiple classes. Client Knowledge Client knows about individual products. Client works with factories, not specific products. Usage Simple use-cases. Complex, multi-product scenarios."},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#summary","title":"Summary","text":"

The Abstract Factory Pattern is a powerful tool when designing systems that need to create multiple families of related objects. While it adds complexity, the benefits include extensibility, maintainability, and loose coupling. In a Spring Boot application, it works well alongside dependency injection, especially when configurations like themes or environments vary.

"},{"location":"fundamentaldives/DesignPatterns/Adapter/","title":"Adapter Design Pattern","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#what","title":"What ?","text":"

The Adapter Pattern is a structural design pattern in software development that allows objects with incompatible interfaces to work together. It acts as a bridge between two incompatible interfaces, providing a wrapper or a mediator to enable their interaction without changing their existing code.

This Pattern converts the interface of a class into another interface that a client expects. This helps integrate two systems with different interfaces so they can work together without altering their code. It is often used when a legacy system needs to be integrated with new components or when third-party APIs are integrated into an existing codebase.

Analogy

Think of a power plug adapter You have an appliance with a US plug (two flat pins), but you need to connect it to a European socket (two round holes). The adapter ensures that both the incompatible interfaces (US and European plugs) work together without modifying either.

"},{"location":"fundamentaldives/DesignPatterns/Adapter/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#ways-to-implement","title":"Ways to Implement","text":"Simple Example

There are two common ways to implement the Adapter Pattern:

  1. Class Adapter (Inheritance-based)
  2. Object Adapter (Composition-based)
Class Adapter Pattern (via Inheritance)

In this approach, the adapter class extends the adaptee (the class that has the incompatible interface) and implements the interface that the client expects.

Class Adapter Java Example
// Target Interface - The desired interface that client expects\ninterface MediaPlayer {\n    void play(String audioType, String fileName);\n}\n\n// Adaptee - Incompatible interface that needs adaptation\nclass AdvancedMediaPlayer {\n    void playMp3(String fileName) {\n        System.out.println(\"Playing mp3 file: \" + fileName);\n    }\n\n    void playMp4(String fileName) {\n        System.out.println(\"Playing mp4 file: \" + fileName);\n    }\n}\n\n// Class Adapter - Adapts AdvancedMediaPlayer to MediaPlayer\nclass MediaAdapter extends AdvancedMediaPlayer implements MediaPlayer {\n    @Override\n    public void play(String audioType, String fileName) {\n        if (audioType.equalsIgnoreCase(\"mp3\")) {\n            playMp3(fileName);\n        } else if (audioType.equalsIgnoreCase(\"mp4\")) {\n            playMp4(fileName);\n        }\n    }\n}\n\n// Client Code\npublic class AudioPlayer {\n    public static void main(String[] args) {\n        MediaPlayer player = new MediaAdapter();\n        player.play(\"mp3\", \"song.mp3\");\n        player.play(\"mp4\", \"video.mp4\");\n    }\n}\n
Explanation

MediaAdapter extends AdvancedMediaPlayer (inheriting the original functionality) and implements the MediaPlayer interface (adapting it to what the client expects).

Object Adapter Pattern (via Composition)

In this approach, the adapter contains an instance of the adaptee class and delegates calls to the appropriate methods.

Object Adapter Java Example
// Target Interface\ninterface MediaPlayer {\n    void play(String audioType, String fileName);\n}\n\n// Adaptee\nclass AdvancedMediaPlayer {\n    void playMp3(String fileName) {\n        System.out.println(\"Playing mp3 file: \" + fileName);\n    }\n\n    void playMp4(String fileName) {\n        System.out.println(\"Playing mp4 file: \" + fileName);\n    }\n}\n\n// Object Adapter\nclass MediaAdapter implements MediaPlayer {\n    private AdvancedMediaPlayer advancedPlayer;\n\n    public MediaAdapter(AdvancedMediaPlayer advancedPlayer) {\n        this.advancedPlayer = advancedPlayer;\n    }\n\n    @Override\n    public void play(String audioType, String fileName) {\n        if (audioType.equalsIgnoreCase(\"mp3\")) {\n            advancedPlayer.playMp3(fileName);\n        } else if (audioType.equalsIgnoreCase(\"mp4\")) {\n            advancedPlayer.playMp4(fileName);\n        }\n    }\n}\n\n// Client Code\npublic class AudioPlayer {\n    public static void main(String[] args) {\n        AdvancedMediaPlayer advancedPlayer = new AdvancedMediaPlayer();\n        MediaPlayer adapter = new MediaAdapter(advancedPlayer);\n        adapter.play(\"mp3\", \"song.mp3\");\n        adapter.play(\"mp4\", \"video.mp4\");\n    }\n}\n
Explanation

In this version, MediaAdapter holds a reference to the AdvancedMediaPlayer instance and delegates method calls instead of extending the class.

Spring Boot Example

In a Spring Boot context, the Adapter Pattern can be used to integrate an external or legacy service with your application's service layer.

Integrating a Legacy Payment Service
// Legacy Payment Service - Adaptee\nclass LegacyPaymentService {\n    public void payWithCreditCard(String cardNumber) {\n        System.out.println(\"Payment made using Legacy Credit Card: \" + cardNumber);\n    }\n}\n\n// Target Interface\ninterface PaymentService {\n    void processPayment(String cardNumber);\n}\n\n// Adapter Implementation - Integrating LegacyPaymentService with PaymentService\n@Component\nclass PaymentServiceAdapter implements PaymentService {\n    private final LegacyPaymentService legacyService;\n\n    // Constructor injection\n    public PaymentServiceAdapter(LegacyPaymentService legacyService) {\n        this.legacyService = legacyService;\n    }\n\n    @Override\n    public void processPayment(String cardNumber) {\n        legacyService.payWithCreditCard(cardNumber);\n    }\n}\n\n// Spring Boot Controller\n@RestController\n@RequestMapping(\"/payments\")\npublic class PaymentController {\n\n    private final PaymentService paymentService;\n\n    @Autowired\n    public PaymentController(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n\n    @PostMapping\n    public String makePayment(@RequestParam String cardNumber) {\n        paymentService.processPayment(cardNumber);\n        return \"Payment Successful\";\n    }\n}\n
Explanation "},{"location":"fundamentaldives/DesignPatterns/Adapter/#summary","title":"Summary","text":"

The Adapter Pattern enhances flexibility by decoupling client code from specific implementations, promotes reusability by enabling compatibility between systems, improves maintainability by isolating legacy or third-party code, and simplifies testing through easy mock or stub usage.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/","title":"Bridge","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#what","title":"What ?","text":"

The Bridge Pattern is a structural design pattern that helps to decouple an abstraction from its implementation so that both can vary independently. This pattern is especially useful when you need to manage complex class hierarchies or have multiple dimensions of variations.

When both the abstraction (interface) and its implementation (how it works internally) need to evolve, the code becomes complex and hard to manage. Bridge helps to separate these concerns. The pattern separates the abstraction (interface) from the actual implementation and lets them evolve independently by delegating the concrete work to another interface.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#structure","title":"Structure ?","text":"

The structure involves two key parts:

The Abstraction contains a reference to the Implementor (interface or class). This lets the abstraction delegate the implementation details to the concrete implementations.

Class Diagram

Abstraction --> Implementor\n    |                |\nRefinedAbstraction   ConcreteImplementor\n
"},{"location":"fundamentaldives/DesignPatterns/Bridge/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s look at a real-world example rendering shapes on different platforms. The rendering logic could vary depending on the platform (Windows, Linux, etc.), but the shape (e.g., Circle, Rectangle) stays the same.

Spring Boot Example

In a Spring Boot application, you can use the Bridge pattern to switch between different implementations of a service dynamically, such as switching between multiple ways of sending notifications (e.g., Email, SMS). This is helpful when services have multiple implementations, and you need to inject them dynamically without changing the client code.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#rendering-shapes-on-different-platforms","title":"Rendering Shapes on Different Platforms","text":"Step-1: Define the Implementor interface
interface Renderer {\n    void render(String shape);\n}\n
Step-2: Create Concrete Implementor classes
class VectorRenderer implements Renderer {\n    @Override\n    public void render(String shape) {\n        System.out.println(\"Rendering \" + shape + \" as vectors.\");\n    }\n}\n\nclass RasterRenderer implements Renderer {\n    @Override\n    public void render(String shape) {\n        System.out.println(\"Rendering \" + shape + \" as pixels.\");\n    }\n}\n
Step-3: Define the Abstraction
abstract class Shape {\n    protected Renderer renderer;\n\n    public Shape(Renderer renderer) {\n        this.renderer = renderer;\n    }\n\n    public abstract void draw();\n}\n
Step-4: Create Refined Abstraction classes
class Circle extends Shape {\n    public Circle(Renderer renderer) {\n        super(renderer);\n    }\n\n    @Override\n    public void draw() {\n        renderer.render(\"Circle\");\n    }\n}\n\nclass Rectangle extends Shape {\n    public Rectangle(Renderer renderer) {\n        super(renderer);\n    }\n\n    @Override\n    public void draw() {\n        renderer.render(\"Rectangle\");\n    }\n}\n
Step-5: Client Code
public class BridgePatternDemo {\n    public static void main(String[] args) {\n        Shape circle = new Circle(new VectorRenderer());\n        circle.draw(); // Output: Rendering Circle as vectors.\n\n        Shape rectangle = new Rectangle(new RasterRenderer());\n        rectangle.draw(); // Output: Rendering Rectangle as pixels.\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Bridge/#notification-system-in-spring-boot","title":"Notification System in Spring Boot","text":"Step-1: Create the Implementor Interface (NotificationSender)
public interface NotificationSender {\n    void send(String message);\n}\n
Step-2. Implement Concrete Implementors (Email and SMS)
@Component\npublic class EmailSender implements NotificationSender {\n    @Override\n    public void send(String message) {\n        System.out.println(\"Sending Email: \" + message);\n    }\n}\n\n@Component\npublic class SmsSender implements NotificationSender {\n    @Override\n    public void send(String message) {\n        System.out.println(\"Sending SMS: \" + message);\n    }\n}\n
Step-3. Create the Abstraction (Notification)
public abstract class Notification {\n    protected NotificationSender sender;\n\n    public Notification(NotificationSender sender) {\n        this.sender = sender;\n    }\n\n    public abstract void notify(String message);\n}\n
Step-4. Create Refined Abstraction (UrgentNotification)
@Component\npublic class UrgentNotification extends Notification {\n\n    @Autowired\n    public UrgentNotification(NotificationSender sender) {\n        super(sender);\n    }\n\n    @Override\n    public void notify(String message) {\n        System.out.println(\"Urgent Notification:\");\n        sender.send(message);\n    }\n}\n
Step-5. Use the Bridge Pattern in a Controller
@RestController\n@RequestMapping(\"/notifications\")\npublic class NotificationController {\n\n    private final UrgentNotification notification;\n\n    @Autowired\n    public NotificationController(UrgentNotification notification) {\n        this.notification = notification;\n    }\n\n    @PostMapping(\"/send\")\n    public String sendNotification(@RequestBody String message) {\n        notification.notify(message);\n        return \"Notification sent!\";\n    }\n}\n

In this example, the Bridge Pattern allows you to switch between different ways of sending notifications (email or SMS) without changing the client code (the NotificationController).

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#summary","title":"Summary","text":"

The Bridge Pattern is an essential design pattern to consider when your class hierarchy is growing unmanageable due to multiple dimensions of variations. Use this pattern when you need to decouple abstraction from implementation and allow them to evolve independently, but avoid it when simpler solutions can suffice.

"},{"location":"fundamentaldives/DesignPatterns/Builder/","title":"Builder Design","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#what","title":"What ?","text":"

The Builder Pattern is a creational design pattern that allows the construction of complex objects step by step. It separates the construction process from the actual object, giving more control over the construction process.

This Pattern simplifies the creation of complex objects with many optional fields by enabling incremental construction through method chaining, avoiding constructors with numerous parameters. It's ideal for objects requiring various configurations or optional parameters.

"},{"location":"fundamentaldives/DesignPatterns/Builder/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Below is a basic Java example demonstrating the pattern. Assume we need to build a Car object with several optional fields.

Simple Builder Implementation
public class Car {\n    // Required fields\n    private final String make;\n    private final String model;\n\n    // Optional fields\n    private final String color;\n    private final int year;\n    private final boolean automatic;\n\n    // Private constructor accessible only through Builder\n    private Car(Builder builder) {\n        this.make = builder.make;\n        this.model = builder.model;\n        this.color = builder.color;\n        this.year = builder.year;\n        this.automatic = builder.automatic;\n    }\n\n    // Getters (optional, based on your needs)\n    public String getMake() { return make; }\n    public String getModel() { return model; }\n    public String getColor() { return color; }\n    public int getYear() { return year; }\n    public boolean isAutomatic() { return automatic; }\n\n    // Static inner Builder class\n    public static class Builder {\n        // Required fields\n        private final String make;\n        private final String model;\n\n        // Optional fields initialized to default values\n        private String color = \"White\";\n        private int year = 2020;\n        private boolean automatic = true;\n\n        // Builder constructor with required fields\n        public Builder(String make, String model) {\n            this.make = make;\n            this.model = model;\n        }\n\n        // Setter-like methods for optional fields, returning the builder object\n        public Builder color(String color) {\n            this.color = color;\n            return this;\n        }\n\n        public Builder year(int year) {\n            this.year = year;\n            return this;\n        }\n\n        public Builder automatic(boolean automatic) {\n            this.automatic = automatic;\n            return this;\n        }\n\n        // Build method to create the final Car object\n        public Car build() {\n            return new Car(this);\n        }\n    }\n\n    @Override\n    public String toString() {\n        return \"Car [make=\" + make + \", model=\" + model + \n            \", color=\" + color + \", year=\" + year + \n            \", automatic=\" + automatic + \"]\";\n    }\n}\n
Usage of the Builder Pattern
public class Main {\n    public static void main(String[] args) {\n        // Using the builder to create a Car object\n        Car car = new Car.Builder(\"Tesla\", \"Model S\")\n                            .color(\"Red\")\n                            .year(2023)\n                            .automatic(true)\n                            .build();\n\n        System.out.println(car);\n    }\n}\n

Output:

Car [make=Tesla, model=Model S, color=Red, year=2023, automatic=true]\n

Spring Boot Example

In Spring Boot, you often need to build objects like DTOs, configurations, or entities with complex structures. Using the Builder Pattern can make object construction more manageable, especially when working with REST APIs.

Using Builder Pattern for DTOs in Spring Boot
// Let's assume a UserDTO object for API responses.\npublic class UserDTO {\n    private final String username;\n    private final String email;\n    private final String role;\n\n    private UserDTO(Builder builder) {\n        this.username = builder.username;\n        this.email = builder.email;\n        this.role = builder.role;\n    }\n\n    public static class Builder {\n        private String username;\n        private String email;\n        private String role;\n\n        public Builder username(String username) {\n            this.username = username;\n            return this;\n        }\n\n        public Builder email(String email) {\n            this.email = email;\n            return this;\n        }\n\n        public Builder role(String role) {\n            this.role = role;\n            return this;\n        }\n\n        public UserDTO build() {\n            return new UserDTO(this);\n        }\n    }\n}\n
Controller Example with Builder Pattern in Spring Boot
@RestController\n@RequestMapping(\"/api/users\")\npublic class UserController {\n\n    @GetMapping(\"/{id}\")\n    public UserDTO getUserById(@PathVariable Long id) {\n        // Simulate fetching user details from a database\n        return new UserDTO.Builder()\n                .username(\"johndoe\")\n                .email(\"john.doe@example.com\")\n                .role(\"ADMIN\")\n                .build();\n    }\n}\n

This approach ensures that the object returned from the API is constructed cleanly with only the necessary fields set.

Alternative Ways

Telescoping Constructors Multiple overloaded constructors for different parameter combinations but Not ideal for readability and maintainability.

public Car(String make, String model) { ... }\npublic Car(String make, String model, String color) { ... }\npublic Car(String make, String model, String color, int year) { ... }\n

Setter Methods Useful for mutable objects but doesn\u2019t guarantee immutability but less readable when constructing objects with many attributes.

Car car = new Car();\ncar.setMake(\"Tesla\");\ncar.setModel(\"Model S\");\ncar.setColor(\"Red\");\n
"},{"location":"fundamentaldives/DesignPatterns/Builder/#summary","title":"Summary","text":"

The Builder Pattern is an elegant way to handle object creation, especially when dealing with many fields or optional parameters. It ensures code readability, immutability, and flexibility while avoiding the need for numerous constructors. However, it should be used only when necessary, as simple objects may not benefit from it.

Note

In Spring Boot, the Builder Pattern can be effectively used for creating DTOs and other complex objects, improving both code readability and maintenance. This pattern fits well when dealing with REST API responses or configuration settings, ensuring your objects are built in a clear, consistent manner.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/","title":"Circuit Breakers","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#what","title":"What ?","text":"

A circuit breaker is a design pattern used to prevent cascading failures and manage service availability. If a service call repeatedly fails, the circuit breaker \"trips\" and prevents further attempts, allowing the system to recover gracefully. This pattern mimics the behavior of electrical circuit breakers.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#why-is-it-needed","title":"Why is it Needed ?","text":"

Real-world scenario

If Service A depends on Service B but Service B becomes unavailable, Service A will receive failures continuously. A circuit breaker prevents Service A from overloading itself and Service B by failing fast.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#types-of-circuit-breakers","title":"Types of Circuit Breakers","text":"

There are multiple models of circuit breakers to choose from depending on use case:

Count-based Circuit Breaker - Trips if a predefined number of failures occur, eg: If there are 3 consecutive failed requests, the breaker opens.

Time-based Circuit Breaker - Monitors failures within a window of time and trips if the failure threshold is met, eg: If 5 requests out of 10 fail within 1 minute, it opens.

Sliding Window Circuit Breaker - A rolling window of requests over time determines if the circuit trips, Useful when failure patterns are sporadic.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#how-does-it-work","title":"How Does it Work?","text":"

The basic mechanics of a circuit breaker involve three states:

Closed State

Open State

Half-Open State

State Transition Flow

Closed -> (failure threshold reached) -> Open -> (timeout) -> Half-Open -> (success) -> Closed\n
"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#use-cases","title":"Use Cases","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#implementation","title":"Implementation","text":"

Several popular libraries and frameworks make circuit breaker implementations simple. Below are code examples in Java and Python.

Java with Resilience4jPython with PyBreaker
// Resilience4j is a library providing circuit breaker implementations.\nimport io.github.resilience4j.circuitbreaker.CircuitBreaker;\nimport io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;\nimport io.github.resilience4j.circuitbreaker.CircuitBreakerRegistry;\n\nimport java.time.Duration;\n\npublic class Example {\n    public static void main(String[] args) {\n        // Configuration\n        CircuitBreakerConfig config = CircuitBreakerConfig.custom()\n            .failureRateThreshold(50)  // Open if 50% of requests fail\n            .waitDurationInOpenState(Duration.ofSeconds(5))  // Wait 5 seconds before Half-Open\n            .build();\n\n        CircuitBreakerRegistry registry = CircuitBreakerRegistry.of(config);\n        CircuitBreaker circuitBreaker = registry.circuitBreaker(\"myService\");\n\n        // Wrap a call in the circuit breaker\n        String response = circuitBreaker.executeSupplier(() -> makeHttpRequest());\n\n        System.out.println(response);\n    }\n\n    private static String makeHttpRequest() {\n        // Simulate an HTTP request here\n        return \"Success!\";\n    }\n}\n
# PyBreaker is a library implementing the Circuit Breaker pattern for Python applications.\n\nfrom pybreaker import CircuitBreaker, CircuitBreakerError\nimport requests\n\n# Define a circuit breaker\nbreaker = CircuitBreaker(fail_max=3, reset_timeout=5)\n\n@breaker\ndef fetch_data(url):\n    response = requests.get(url)\n    if response.status_code != 200:\n        raise Exception(\"Service unavailable\")\n    return response.json()\n\ntry:\n    data = fetch_data('https://api.example.com/data')\n    print(data)\nexcept CircuitBreakerError:\n    print(\"Circuit is open. Service unavailable.\")\n
"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#advanced-topics","title":"Advanced Topics","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#monitoring-and-metrics","title":"Monitoring and Metrics","text":"

Circuit breakers need to be monitored to ensure they perform correctly. You can integrate them with monitoring tools like Prometheus or Grafana. Many libraries offer hooks to capture metrics such as

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#tuning-the-circuit-breaker","title":"Tuning the Circuit Breaker","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#testing-circuit-breakers","title":"Testing Circuit Breakers","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#summary","title":"Summary","text":"

Circuit breakers are crucial in modern, distributed systems, preventing unnecessary retries and protecting systems from cascading failures. As systems grow in complexity, using the circuit breaker pattern helps maintain high availability and resilience.

"},{"location":"fundamentaldives/DesignPatterns/Composite/","title":"Composite","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#what","title":"What ?","text":"

The Composite Pattern is a structural design pattern used in software design to represent part whole hierarchies. It enables you to build complex object structures by treating both individual objects and compositions of objects uniformly.

This pattern allows you to treat a group of objects in the same way as a single object. This is especially useful when building tree structures (like directories or UI components).

Key Concepts

The idea is to define a common interface for all the objects, whether simple or complex, so they can be treated uniformly.

Basic Structure UML

Component (Interface or Abstract class)\n\u251c\u2500\u2500 Leaf (Concrete class)\n\u2514\u2500\u2500 Composite (Concrete class containing Components)\n
"},{"location":"fundamentaldives/DesignPatterns/Composite/#when","title":"When ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#ways-to-create","title":"Ways to Create ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#how-to-use-effectively","title":"How to Use Effectively?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#how-to-apply","title":"How to Apply ?","text":"Simple Example
// 1. Component Interface\ninterface Component {\n    void showDetails();  // Common operation for both Leaf and Composite\n}\n\n// 2. Leaf Class (Single object)\nclass Employee implements Component {\n    private String name;\n    private String position;\n\n    public Employee(String name, String position) {\n        this.name = name;\n        this.position = position;\n    }\n\n    @Override\n    public void showDetails() {\n        System.out.println(name + \" works as \" + position);\n    }\n}\n\n// 3. Composite Class (Composite of Components)\nclass Department implements Component {\n    private List<Component> employees = new ArrayList<>();\n\n    public void addEmployee(Component employee) {\n        employees.add(employee);\n    }\n\n    public void removeEmployee(Component employee) {\n        employees.remove(employee);\n    }\n\n    @Override\n    public void showDetails() {\n        for (Component employee : employees) {\n            employee.showDetails();\n        }\n    }\n}\n\n// 4. Client Code\npublic class CompositePatternDemo {\n    public static void main(String[] args) {\n        // Create individual employees\n        Component emp1 = new Employee(\"John\", \"Developer\");\n        Component emp2 = new Employee(\"Doe\", \"Tester\");\n\n        // Create a department and add employees to it\n        Department department = new Department();\n        department.addEmployee(emp1);\n        department.addEmployee(emp2);\n\n        // Show details\n        System.out.println(\"Department Details:\");\n        department.showDetails();\n    }\n}\n
Output
Department Details:\nJohn works as Developer\nDoe works as Tester\n
Spring Boot Example

In Spring Boot, the Composite pattern can fit into cases where you model tree-based structures in your business logic, such as:

  1. Entity Relationships in JPA: Modeling nested categories, departments, or menus.
  2. Business Service Layer: Creating a unified API to handle both individual and composite objects.
"},{"location":"fundamentaldives/DesignPatterns/Composite/#product-category-service-in-spring-boot","title":"Product Category Service in Spring Boot","text":"
// 1. Component Interface\npublic interface ProductCategory {\n    String getName();\n    void showCategoryDetails();\n}\n\n// 2. Leaf Class\npublic class Product implements ProductCategory {\n    private String name;\n\n    public Product(String name) {\n        this.name = name;\n    }\n\n    @Override\n    public String getName() {\n        return name;\n    }\n\n    @Override\n    public void showCategoryDetails() {\n        System.out.println(\"Product: \" + name);\n    }\n}\n\n// 3. Composite Class\npublic class Category implements ProductCategory {\n    private String name;\n    private List<ProductCategory> children = new ArrayList<>();\n\n    public Category(String name) {\n        this.name = name;\n    }\n\n    public void add(ProductCategory category) {\n        children.add(category);\n    }\n\n    public void remove(ProductCategory category) {\n        children.remove(category);\n    }\n\n    @Override\n    public String getName() {\n        return name;\n    }\n\n    @Override\n    public void showCategoryDetails() {\n        System.out.println(\"Category: \" + name);\n        for (ProductCategory child : children) {\n            child.showCategoryDetails();\n        }\n    }\n}\n\n// 4. Controller in Spring Boot\n@RestController\n@RequestMapping(\"/categories\")\npublic class CategoryController {\n\n    @GetMapping(\"/example\")\n    public void example() {\n        // Creating products\n        ProductCategory p1 = new Product(\"Laptop\");\n        ProductCategory p2 = new Product(\"Phone\");\n\n        // Creating a category and adding products\n        Category electronics = new Category(\"Electronics\");\n        electronics.add(p1);\n        electronics.add(p2);\n\n        // Display details\n        electronics.showCategoryDetails();\n    }\n}\n
Sample Output when calling /categories/example
Category: Electronics\nProduct: Laptop\nProduct: Phone\n
Spring Boot Considerations "},{"location":"fundamentaldives/DesignPatterns/Composite/#summary","title":"Summary","text":"

The Composite Pattern is a powerful structural pattern for managing hierarchical, tree-like structures. It allows uniform handling of individual and composite objects, making it ideal for UI elements, filesystems, or business domains with nested elements. When integrating with Spring Boot, it works well in controllers, services, or JPA entities for modeling hierarchical data. However, avoid using it when there\u2019s no hierarchy or when performance is critical (deep recursion). Use it wisely, and it can help you reduce complexity and simplify your code.

"},{"location":"fundamentaldives/DesignPatterns/Decorator/","title":"Decorator","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#what","title":"What ?","text":"

The Decorator Pattern is a structural design pattern that allows behavior to be added to individual objects, either statically or dynamically, without affecting the behavior of other objects from the same class. This pattern is particularly useful when you need to add functionality to objects without subclassing and in scenarios where multiple combinations of behaviors are required.

This pattern is used to attach additional responsibilities or behaviors to an object dynamically. It wraps the original object, adding new behavior while keeping the object\u2019s interface intact. A decorator class has a reference to the original object and implements the same interface.

Key Concepts

"},{"location":"fundamentaldives/DesignPatterns/Decorator/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#how-to-apply","title":"How to Apply ?","text":"Simple Example

Let's consider an example where we are building a coffee shop. Different types of coffees can be enhanced with add-ons like milk, sugar, etc. Using the decorator pattern, we can apply these add-ons dynamically without subclassing.

// Step 1: Component Interface\npublic interface Coffee {\n    String getDescription();\n    double getCost();\n}\n\n// Step 2: ConcreteComponent (Basic Coffee)\npublic class BasicCoffee implements Coffee {\n    @Override\n    public String getDescription() {\n        return \"Basic Coffee\";\n    }\n\n    @Override\n    public double getCost() {\n        return 2.0;\n    }\n}\n\n// Step 3: Decorator (Abstract)\npublic abstract class CoffeeDecorator implements Coffee {\n    protected Coffee coffee; // The object being decorated\n\n    public CoffeeDecorator(Coffee coffee) {\n        this.coffee = coffee;\n    }\n\n    public String getDescription() {\n        return coffee.getDescription();\n    }\n\n    public double getCost() {\n        return coffee.getCost();\n    }\n}\n\n// Step 4: Concrete Decorators (e.g., Milk, Sugar)\npublic class MilkDecorator extends CoffeeDecorator {\n    public MilkDecorator(Coffee coffee) {\n        super(coffee);\n    }\n\n    @Override\n    public String getDescription() {\n        return coffee.getDescription() + \", Milk\";\n    }\n\n    @Override\n    public double getCost() {\n        return coffee.getCost() + 0.5;\n    }\n}\n\npublic class SugarDecorator extends CoffeeDecorator {\n    public SugarDecorator(Coffee coffee) {\n        super(coffee);\n    }\n\n    @Override\n    public String getDescription() {\n        return coffee.getDescription() + \", Sugar\";\n    }\n\n    @Override\n    public double getCost() {\n        return coffee.getCost() + 0.2;\n    }\n}\n\n// Step 5: Usage\npublic class CoffeeShop {\n    public static void main(String[] args) {\n        Coffee coffee = new BasicCoffee();\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n\n        coffee = new MilkDecorator(coffee);\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n\n        coffee = new SugarDecorator(coffee);\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n    }\n}\n
Output
Basic Coffee $2.0\nBasic Coffee, Milk $2.5\nBasic Coffee, Milk, Sugar $2.7\n
Spring Boot Example

In Spring Boot, the decorator pattern can be used in scenarios such as logging, monitoring, or security checks. You can implement a decorator pattern to enhance service classes without changing their core logic. Here's an example where we decorate a service class to add logging functionality.

Component Interface (Service Layer)
public interface UserService {\n    String getUserDetails(String userId);\n}\n
Concrete Component
@Service\npublic class UserServiceImpl implements UserService {\n    @Override\n    public String getUserDetails(String userId) {\n        return \"User details for \" + userId;\n    }\n}\n
Decorator
@Service\npublic class LoggingUserService implements UserService {\n\n    private final UserService userService;\n\n    public LoggingUserService(UserService userService) {\n        this.userService = userService;\n    }\n\n    @Override\n    public String getUserDetails(String userId) {\n        System.out.println(\"Fetching details for user: \" + userId);\n        return userService.getUserDetails(userId);\n    }\n}\n
Configuration to Use Decorator
@Configuration\npublic class ServiceConfig {\n\n    @Bean\n    public UserService userService(UserServiceImpl userServiceImpl) {\n        return new LoggingUserService(userServiceImpl);\n    }\n}\n
How it Works in Spring Boot "},{"location":"fundamentaldives/DesignPatterns/Decorator/#summary","title":"Summary","text":"

The Decorator Pattern is a powerful and flexible way to enhance objects with additional behaviors dynamically without altering their structure. It shines in scenarios requiring combinations of behaviors and helps maintain clean, modular code.

In Spring Boot, it can be used for decorating services with additional features like logging, security, or metrics, allowing these aspects to remain separate from the core business logic.

This pattern should be used thoughtfully since excessive use can introduce complexity and make debugging difficult. However, when applied correctly, it ensures that code remains extensible and adheres to the Single Responsibility Principle and Open Closed Principle.

"},{"location":"fundamentaldives/DesignPatterns/Facade/","title":"Facade","text":"

The Facade Pattern is a structural design pattern commonly used to provide a simple, unified interface to a complex subsystem of classes, libraries, or frameworks. This pattern makes a complex library or system easier to use by hiding the underlying complexities and exposing only the functionality that is relevant for the client.

"},{"location":"fundamentaldives/DesignPatterns/Facade/#what","title":"What ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#how-to-create","title":"How to Create ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#how-to-apply","title":"How to Apply ?","text":"Simple Example

Let's go through a simple Java example demonstrating a Facade pattern for a Home Theater System:

// Subsystem classes\nclass Amplifier {\n    public void on() { System.out.println(\"Amplifier is ON.\"); }\n    public void off() { System.out.println(\"Amplifier is OFF.\"); }\n}\n\nclass DVDPlayer {\n    public void play() { System.out.println(\"Playing movie.\"); }\n    public void stop() { System.out.println(\"Stopping movie.\"); }\n}\n\nclass Projector {\n    public void on() { System.out.println(\"Projector is ON.\"); }\n    public void off() { System.out.println(\"Projector is OFF.\"); }\n}\n\n// Facade class\nclass HomeTheaterFacade {\n    private Amplifier amplifier;\n    private DVDPlayer dvdPlayer;\n    private Projector projector;\n\n    public HomeTheaterFacade(Amplifier amp, DVDPlayer dvd, Projector proj) {\n        this.amplifier = amp;\n        this.dvdPlayer = dvd;\n        this.projector = proj;\n    }\n\n    public void watchMovie() {\n        System.out.println(\"Setting up movie...\");\n        amplifier.on();\n        projector.on();\n        dvdPlayer.play();\n    }\n\n    public void endMovie() {\n        System.out.println(\"Shutting down movie...\");\n        dvdPlayer.stop();\n        projector.off();\n        amplifier.off();\n    }\n}\n\n// Client code\npublic class FacadePatternDemo {\n    public static void main(String[] args) {\n        Amplifier amp = new Amplifier();\n        DVDPlayer dvd = new DVDPlayer();\n        Projector proj = new Projector();\n\n        HomeTheaterFacade homeTheater = new HomeTheaterFacade(amp, dvd, proj);\n\n        homeTheater.watchMovie();\n        homeTheater.endMovie();\n    }\n}\n
Output
Setting up movie...\nAmplifier is ON.\nProjector is ON.\nPlaying movie.\nShutting down movie...\nStopping movie.\nProjector is OFF.\nAmplifier is OFF.\n
Explanation: Spring Boot Example

In Spring Boot, the Facade pattern can be applied to services or controllers to hide the complexity of business logic or external systems. For example, a facade class can wrap multiple service calls or integrate external APIs to provide a simplified interface to the client (like a REST controller).

Let's go through a example of how to apply the Facade pattern in a Spring Boot application.

Example Scenario: A Payment System interacts with several services (like PaymentGatewayService, NotificationService, and OrderService). We create a PaymentFacade to simplify the interaction.

Step-1: Subsystem Services
@Service\npublic class PaymentGatewayService {\n    public void processPayment(String orderId) {\n        System.out.println(\"Processing payment for order: \" + orderId);\n    }\n}\n\n@Service\npublic class NotificationService {\n    public void sendNotification(String message) {\n        System.out.println(\"Sending notification: \" + message);\n    }\n}\n\n@Service\npublic class OrderService {\n    public void completeOrder(String orderId) {\n        System.out.println(\"Completing order: \" + orderId);\n    }\n}\n
Step-2: Facade Class
@Service\npublic class PaymentFacade {\n\n    private final PaymentGatewayService paymentGatewayService;\n    private final NotificationService notificationService;\n    private final OrderService orderService;\n\n    @Autowired\n    public PaymentFacade(PaymentGatewayService paymentGatewayService,\n                        NotificationService notificationService,\n                        OrderService orderService) {\n        this.paymentGatewayService = paymentGatewayService;\n        this.notificationService = notificationService;\n        this.orderService = orderService;\n    }\n\n    public void makePayment(String orderId) {\n        System.out.println(\"Initiating payment process...\");\n        paymentGatewayService.processPayment(orderId);\n        orderService.completeOrder(orderId);\n        notificationService.sendNotification(\"Payment completed for order: \" + orderId);\n    }\n}\n
Step-3: Controller
@RestController\n@RequestMapping(\"/api/payment\")\npublic class PaymentController {\n\n    private final PaymentFacade paymentFacade;\n\n    @Autowired\n    public PaymentController(PaymentFacade paymentFacade) {\n        this.paymentFacade = paymentFacade;\n    }\n\n    @PostMapping(\"/pay/{orderId}\")\n    public ResponseEntity<String> pay(@PathVariable String orderId) {\n        paymentFacade.makePayment(orderId);\n        return ResponseEntity.ok(\"Payment successful for order: \" + orderId);\n    }\n}\n
Explanation "},{"location":"fundamentaldives/DesignPatterns/Facade/#summary","title":"Summary","text":"

The Facade Pattern is a powerful tool for simplifying interactions with complex systems. It is especially useful when working with large subsystems or external APIs, as it encapsulates the internal workings and provides a simple interface to clients. In a Spring Boot application, you can use it to manage complex business logic or interactions with multiple services within a single, cohesive facade. However, it should be used judiciously to avoid over-abstraction or unnecessary complexity.

Note

This makes the Facade pattern a valuable asset in both object-oriented design and modern frameworks like Spring Boot.

"},{"location":"fundamentaldives/DesignPatterns/Facade/#this-pattern-is-best-used-when","title":"This pattern is best used when:","text":"
- You need to simplify client interactions.\n- You want to decouple the client from a complex subsystem.\n- You aim to improve maintainability and reduce dependencies.\n
"},{"location":"fundamentaldives/DesignPatterns/Facade/#avoid-using-it-when","title":"Avoid using it when:","text":"
- The system is already simple.\n- Performance is a key concern.\n
"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/","title":"Factory Method","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#what","title":"What ?","text":"

Factory Method is a creational design pattern that provides an interface for creating objects in a superclass, but allows subclasses to alter the type of objects that will be created (decide which class to instantiate). This pattern delegates the responsibility of object creation to subclasses rather than using a direct constructor call, It is one of the most widely used creational design patterns, It helps in the creation of objects without specifying the exact class of the object that will be created.

You provide a \"factory\" method that the client code calls to get the object, but the actual object that gets created is determined at runtime (based on some logic).

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#where-it-shines","title":"Where it Shines ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#structure","title":"Structure","text":"
  1. Product Interface: Defines the interface for the object being created.
  2. Concrete Product: Implements the product interface.
  3. Creator: Declares the factory method which returns an object of type Product.
  4. Concrete Creator: Overrides the factory method to return a specific product instance.
"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#how-to-implement","title":"How To Implement ?","text":"Structured Example Step-1: Define a Product Interface
public interface Notification {\n    void notifyUser();\n}\n
Step-2: Create Concrete Implementations of the Product
public class SMSNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an SMS notification.\");\n    }\n}\n\npublic class EmailNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an Email notification.\");\n    }\n}\n
Step-3: Create an Abstract Factory Class
public abstract class NotificationFactory {\n    public abstract Notification createNotification();\n}\n
Step-4: Implement Concrete Factory Classes
public class SMSNotificationFactory extends NotificationFactory {\n    @Override\n    public Notification createNotification() {\n        return new SMSNotification();\n    }\n}\n\npublic class EmailNotificationFactory extends NotificationFactory {\n    @Override\n    public Notification createNotification() {\n        return new EmailNotification();\n    }\n}\n
Step-5: Usage in Client Code
public class Client {\n    public static void main(String[] args) {\n        NotificationFactory factory = new SMSNotificationFactory();\n        Notification notification = factory.createNotification();\n        notification.notifyUser();\n\n        factory = new EmailNotificationFactory();\n        notification = factory.createNotification();\n        notification.notifyUser();\n    }\n}\n
Spring Boot Example

Spring Boot relies heavily on dependency injection (DI) and Inversion of Control (IoC), which means Spring beans can act as factory classes to produce the desired objects.

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#example-notification-factory-with-spring-boot","title":"Example: Notification Factory with Spring Boot","text":"Define the Product Interface and Implementations (Same as Before)
public interface Notification {\n    void notifyUser();\n}\n\npublic class SMSNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an SMS notification.\");\n    }\n}\n\npublic class EmailNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an Email notification.\");\n    }\n}\n
Create a Spring Factory Class
import org.springframework.stereotype.Service;\n// This class will act as the **Factory**. You can make it a Spring **`@Service` or `@Component`** bean, so Spring manages it.\n@Service\npublic class NotificationFactory {\n    public Notification createNotification(String type) {\n        if (type.equalsIgnoreCase(\"SMS\")) {\n            return new SMSNotification();\n        } else if (type.equalsIgnoreCase(\"Email\")) {\n            return new EmailNotification();\n        }\n        throw new IllegalArgumentException(\"Unknown notification type: \" + type);\n    }\n}\n
Use the Factory Class in a Spring Controller
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class NotificationController {\n\n    @Autowired\n    private NotificationFactory notificationFactory;\n\n    @GetMapping(\"/notify/{type}\")\n    public String sendNotification(@PathVariable String type) {\n        Notification notification = notificationFactory.createNotification(type);\n        notification.notifyUser();\n        return \"Notification sent: \" + type;\n    }\n}\n

When you access /notify/SMS or /notify/Email, it will dynamically create and send the corresponding notification after running the spring boot application.

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#summary","title":"Summary","text":"

The Factory Pattern enables dynamic object creation without specifying exact classes, reducing coupling and improving maintainability. It uses a factory method to determine which class to instantiate. In Spring Boot, this pattern integrates seamlessly through factory beans and dependency injection, providing flexible and condition-based object creation.

Note

Factory Method is especially helpful in modular systems where new functionalities might be added frequently, and we want to minimize the impact of changes to existing code.

"},{"location":"fundamentaldives/DesignPatterns/Iterator/","title":"Iterator","text":"

The Iterator pattern is a behavioral design pattern that allows sequential access to elements of a collection without exposing its underlying structure. The goal is to provide a way to access the elements of an aggregate object (such as an array, list, or set) one by one, without needing to understand how the collection is implemented.

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#what","title":"What ?","text":"

Iterator is a behavioral design pattern that lets you traverse elements of a collection without exposing its underlying representation (list, stack, tree, etc.).

Key Components

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#disadvantages","title":"DisAdvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#how-to-implement","title":"How to Implement ?","text":"Sample Example

Let's go through with a simple Java example of a custom iterator for a list of strings.

Defining the Iterator Interface
interface Iterator<T> {\n    boolean hasNext();\n    T next();\n}\n
Creating a Concrete Iterator
class NameIterator implements Iterator<String> {\n    private String[] names;\n    private int index;\n\n    public NameIterator(String[] names) {\n        this.names = names;\n    }\n\n    @Override\n    public boolean hasNext() {\n        return index < names.length;\n    }\n\n    @Override\n    public String next() {\n        if (this.hasNext()) {\n            return names[index++];\n        }\n        return null;\n    }\n}\n
Defining the Aggregate (Collection) Interface
interface NameCollection {\n    Iterator<String> getIterator();\n}\n
Implementing the Concrete Aggregate (Collection)
class NameRepository implements NameCollection {\n    private String[] names = {\"John\", \"Alice\", \"Robert\", \"Michael\"};\n\n    @Override\n    public Iterator<String> getIterator() {\n        return new NameIterator(names);\n    }\n}\n
Usage Example in Java
public class IteratorPatternDemo {\n    public static void main(String[] args) {\n        NameRepository namesRepository = new NameRepository();\n        Iterator<String> iterator = namesRepository.getIterator();\n\n        while (iterator.hasNext()) {\n            String name = iterator.next();\n            System.out.println(\"Name: \" + name);\n        }\n    }\n}\n
Using Java\u2019s Built-in Iterators

Java already provides built-in iterators for its collection framework (Iterator, ListIterator, and Spliterator).

Example with Java\u2019s Built-in Iterator
import java.util.ArrayList;\nimport java.util.Iterator;\nimport java.util.List;\n\npublic class BuiltInIteratorExample {\n    public static void main(String[] args) {\n        List<String> names = new ArrayList<>();\n        names.add(\"John\");\n        names.add(\"Alice\");\n        names.add(\"Robert\");\n\n        Iterator<String> iterator = names.iterator();\n        while (iterator.hasNext()) {\n            System.out.println(\"Name: \" + iterator.next());\n        }\n    }\n}\n
Spring Boot Example

Spring Boot doesn\u2019t explicitly use the Iterator pattern as part of its core framework, but it does utilize Iterators internally (e.g., when dealing with ApplicationContext beans or data access layers). You can integrate the Iterator pattern within Spring Boot to handle collections of objects such as configurations, database entities, or APIs.

Create a Model Class
public class Book {\n    private String title;\n\n    public Book(String title) {\n        this.title = title;\n    }\n\n    public String getTitle() {\n        return title;\n    }\n}\n
Create a Repository to Hold Books
import java.util.ArrayList;\nimport java.util.List;\n\npublic class BookRepository {\n    private List<Book> books = new ArrayList<>();\n\n    public BookRepository() {\n        books.add(new Book(\"Spring in Action\"));\n        books.add(new Book(\"Java 8 in Action\"));\n        books.add(new Book(\"Microservices Patterns\"));\n    }\n\n    public Iterator<Book> getIterator() {\n        return books.iterator();\n    }\n}\n
Service to Fetch Books Using Iterator
import org.springframework.stereotype.Service;\nimport java.util.Iterator;\n\n@Service\npublic class BookService {\n    private final BookRepository bookRepository = new BookRepository();\n\n    public void printBooks() {\n        Iterator<Book> iterator = bookRepository.getIterator();\n        while (iterator.hasNext()) {\n            Book book = iterator.next();\n            System.out.println(\"Book: \" + book.getTitle());\n        }\n    }\n}\n
Controller to Trigger the Book Listing
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\n@RequestMapping(\"/books\")\npublic class BookController {\n\n    @Autowired\n    private BookService bookService;\n\n    @GetMapping\n    public void listBooks() {\n        bookService.printBooks();\n    }\n}\n

When you access http://localhost:8080/books, it will print the list of books using the iterator after running the application

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#summary","title":"Summary","text":"

The Iterator pattern is a powerful way to decouple iteration logic from collections, ensuring a clean separation of concerns. It\u2019s useful in Java projects where complex or multiple ways of traversal are required. When integrated into Spring Boot, the pattern can be applied to iterate over configurations, data models, or APIs efficiently.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/","title":"Prototype","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#what","title":"What ?","text":"

The Prototype Pattern is a creational design pattern used when the cost of creating a new object is expensive or complicated. Instead of creating new instances from scratch, this pattern suggests cloning existing objects to produce new ones.

The pattern allows cloning or copying existing instances to create new ones, ensuring that new objects are created without going through the expensive or complex instantiation process repeatedly.

Key Characteristics

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#how-to-implement","title":"How to Implement ?","text":"Simple Example

In Java, the Prototype Pattern is implemented by making the class implement the Cloneable interface and overriding the clone() method from the Object class.

Example of Prototype in Java
class Address {\n    String street;\n    String city;\n\n    public Address(String street, String city) {\n        this.street = street;\n        this.city = city;\n    }\n\n    // Deep Copy\n    public Address(Address address) {\n        this.street = address.street;\n        this.city = address.city;\n    }\n\n    @Override\n    public String toString() {\n        return street + \", \" + city;\n    }\n}\n\nclass Employee implements Cloneable {\n    String name;\n    Address address;\n\n    public Employee(String name, Address address) {\n        this.name = name;\n        this.address = address;\n    }\n\n    // Shallow Copy\n    @Override\n    protected Object clone() throws CloneNotSupportedException {\n        return super.clone();\n    }\n\n    // Deep Copy\n    public Employee deepClone() {\n        return new Employee(this.name, new Address(this.address));\n    }\n\n    @Override\n    public String toString() {\n        return \"Employee: \" + name + \", Address: \" + address;\n    }\n}\n\npublic class PrototypeExample {\n    public static void main(String[] args) throws CloneNotSupportedException {\n        Employee emp1 = new Employee(\"Alice\", new Address(\"123 Street\", \"New York\"));\n\n        // Shallow Clone\n        Employee emp2 = (Employee) emp1.clone();\n\n        // Deep Clone\n        Employee emp3 = emp1.deepClone();\n\n        System.out.println(\"Original: \" + emp1);\n        System.out.println(\"Shallow Copy: \" + emp2);\n        System.out.println(\"Deep Copy: \" + emp3);\n\n        // Modify the original object to see the effect on shallow vs deep copy\n        emp1.address.street = \"456 Avenue\";\n\n        System.out.println(\"After modifying the original object:\");\n        System.out.println(\"Original: \" + emp1);\n        System.out.println(\"Shallow Copy: \" + emp2);\n        System.out.println(\"Deep Copy: \" + emp3);\n    }\n}\n
Output
Original: Employee: Alice, Address: 123 Street, New York\nShallow Copy: Employee: Alice, Address: 123 Street, New York\nDeep Copy: Employee: Alice, Address: 123 Street, New York\n\nAfter modifying the original object:\nOriginal: Employee: Alice, Address: 456 Avenue, New York\nShallow Copy: Employee: Alice, Address: 456 Avenue, New York\nDeep Copy: Employee: Alice, Address: 123 Street, New York\n
Explanation Spring Boot Example

Spring Framework allows defining prototype-scoped beans. Each time you request a bean with the prototype scope, Spring returns a new instance, effectively following the Prototype Pattern.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#how-to-use-prototype-scope-in-spring-boot","title":"How to Use Prototype Scope in Spring Boot","text":"
  1. Add @Scope annotation to the bean definition.
  2. Use prototype scope to ensure each request gets a new object.
"},{"location":"fundamentaldives/DesignPatterns/Prototype/#simple-prototype-scope-example-in-spring-boot","title":"Simple Prototype Scope Example in Spring Boot","text":"Bean Definition
import org.springframework.context.annotation.Scope;\nimport org.springframework.stereotype.Component;\n\n@Component\n@Scope(\"prototype\")\npublic class Employee {\n    public Employee() {\n        System.out.println(\"New Employee instance created.\");\n    }\n}\n
Controller
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class EmployeeController {\n\n    @Autowired\n    private Employee employee1;\n\n    @Autowired\n    private Employee employee2;\n\n    @GetMapping(\"/employees\")\n    public String getEmployees() {\n        return \"Employee 1: \" + employee1 + \" | Employee 2: \" + employee2;\n    }\n}\n
Output

When you run this application and hit the /employees endpoint, you will see:

New Employee instance created.\nNew Employee instance created.\n

This shows that a new instance is created each time a prototype-scoped bean is injected.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#summary","title":"Summary","text":"

The Prototype Pattern offers an elegant way to clone existing objects, saving the overhead of complex object creation. It fits well when objects are expensive to create or share the same initial configuration. With Java's cloning mechanisms and Spring Boot's prototype scope, it is easy to implement. However, care must be taken when handling deep versus shallow copies, and the pattern should be avoided when objects are inexpensive to create.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/","title":"Singleton","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#what","title":"What ?","text":"

The Singleton Pattern is a creational design pattern that ensures a class has only one instance and provides a global access point to that instance.

Singleton is useful when exactly one instance of a class is needed across the system, like for logging, configuration, database connection pools, etc.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#how-to-implement","title":"How to Implement ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#eager-initialization","title":"Eager Initialization","text":"

The instance is created when the class is loaded. This is the simplest way, but it doesn\u2019t support lazy loading.

Eager Initialization Implementation
public class EagerSingleton {\n    private static final EagerSingleton INSTANCE = new EagerSingleton();\n\n    // Private constructor to prevent instantiation\n    private EagerSingleton() {}\n\n    public static EagerSingleton getInstance() {\n        return INSTANCE;\n    }\n}\n

When to Use Eager

When the instance is required throughout the application, and we are okay with it being created at startup.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#lazy-initialization","title":"Lazy Initialization","text":"

The instance is created only when needed (on first access). But this version is not thread-safe.

Lazy Initialization Implementation
public class LazySingleton {\n    private static LazySingleton instance;\n\n    private LazySingleton() {}\n\n    public static LazySingleton getInstance() {\n        if (instance == null) {\n            instance = new LazySingleton();\n        }\n        return instance;\n    }\n}\n

Issue with Lazy

Lazy Initialization is not suitable for multithreaded environments.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#using-synchronized","title":"Using Synchronized","text":"

This solves the issue of thread safety by synchronizing the access method.

Synchronized Implementation
public class ThreadSafeSingleton {\n    private static ThreadSafeSingleton instance;\n\n    private ThreadSafeSingleton() {}\n\n    public static synchronized ThreadSafeSingleton getInstance() {\n        if (instance == null) {\n            instance = new ThreadSafeSingleton();\n        }\n        return instance;\n    }\n}\n

Issue with Synchronized

Performance overhead due to synchronization.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#double-checked-locking","title":"Double-Checked Locking","text":"

This improves the performance by reducing the overhead of synchronized block.

Double-Checked Locking Implementation
public class DoubleCheckedLockingSingleton {\n    private static volatile DoubleCheckedLockingSingleton instance;\n\n    private DoubleCheckedLockingSingleton() {}\n\n    public static DoubleCheckedLockingSingleton getInstance() {\n        if (instance == null) {\n            synchronized (DoubleCheckedLockingSingleton.class) {\n                if (instance == null) {\n                    instance = new DoubleCheckedLockingSingleton();\n                }\n            }\n        }\n        return instance;\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Singleton/#bill-pugh-singleton","title":"Bill Pugh Singleton","text":"

This approach leverages static inner classes, which ensures thread safety and lazy loading without synchronization overhead.

Bill Pugh Singleton Implementation
public class BillPughSingleton {\n    private BillPughSingleton() {}\n\n    // Static inner class responsible for holding the instance\n    private static class SingletonHelper {\n        private static final BillPughSingleton INSTANCE = new BillPughSingleton();\n    }\n\n    public static BillPughSingleton getInstance() {\n        return SingletonHelper.INSTANCE;\n    }\n}\n

Best Practice

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#enum-singleton","title":"Enum Singleton","text":"

This approach is the most concise and prevents issues with serialization and reflection attacks.

Enum Singleton Implementation
public enum EnumSingleton {\n    INSTANCE;\n\n    public void someMethod() {\n        System.out.println(\"Enum Singleton Instance\");\n    }\n}\n

Recommended

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#in-spring-boot","title":"In Spring Boot","text":"

In Spring Boot, Spring\u2019s IoC container (Inversion of Control) makes singleton beans by default. Each bean in Spring is, by default, a singleton. So, you don\u2019t need to explicitly implement the Singleton pattern. Instead, you annotate the class with @Component or @Service, and Spring ensures that only one instance is created and managed.

Spring Boot Example How to init in a Spring Boot application
import org.springframework.stereotype.Component;\n\n@Component\npublic class MySingletonService {\n    public void doSomething() {\n        System.out.println(\"Singleton service is working\");\n    }\n}\n
How to use in a Spring Boot application
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class MyController {\n\n    private final MySingletonService singletonService;\n\n    @Autowired\n    public MyController(MySingletonService singletonService) {\n        this.singletonService = singletonService;\n    }\n\n    @GetMapping(\"/test\")\n    public String test() {\n        singletonService.doSomething();\n        return \"Check logs for Singleton Service\";\n    }\n}\n

Note

Spring manages lifecycle and thread safety for you, ensuring it behaves like a Singleton without extra code.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#comparison","title":"Comparison","text":"Implementation Thread Safety Lazy Initialization Serialization Safe Ease of Implementation Eager Initialization Yes No No Easy Lazy Initialization No Yes No Easy Thread-safe Singleton (Synchronized) Yes Yes No Moderate Double-Checked Locking Singleton Yes Yes No Moderate Bill Pugh Singleton Yes Yes No Best Practice Enum Singleton Yes Yes Yes Recommended"},{"location":"fundamentaldives/DesignPatterns/Singleton/#potential-issues","title":"Potential Issues","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#summary","title":"Summary","text":"

The Singleton Pattern is a powerful tool when used appropriately. However, misuse can lead to tightly coupled code, concurrency issues, and testing difficulties.

Note

If you are working with Spring Boot, rely on Spring\u2019s built-in singleton beans instead of implementing your own singleton logic. Where thread safety, serialization, or distributed behavior is required, choose the appropriate Singleton implementation like Enum Singleton or Bill Pugh Singleton.

By default, a single instance of the bean is created and shared across the entire application (singleton scope). If two or more components use the same bean, they will refer to the same instance. However, if you need a new instance every time a bean is requested, you can change the scope to prototype. But be mindful Spring\u2019s singleton scope simplifies things like caching and state consistency, while prototype beans may introduce complexity.

"},{"location":"fundamentaldives/DesignPatterns/Strategy/","title":"Strategy","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#what","title":"What ?","text":"

The Strategy Pattern is a behavioral design pattern that allows you to define a family of algorithms, encapsulate each one, and make them interchangeable. It lets the algorithm vary independently from clients that use it, promoting flexibility, scalability, and separation of concerns.

In simpler terms, It enables selecting a specific algorithm at runtime based on the context, without modifying the client code.

"},{"location":"fundamentaldives/DesignPatterns/Strategy/#when-to-use","title":"When to Use?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#when-not-to-use","title":"When Not to Use?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s implement a payment system that allows different payment methods using the Strategy Pattern. We will define multiple payment strategies (like Credit Card and PayPal) and switch between them dynamically.

Step-1: Create the Strategy Interface
// PaymentStrategy.java\npublic interface PaymentStrategy {\n    void pay(int amount);\n}\n
Step-2: Implement Concrete Strategies
// CreditCardStrategy.java\npublic class CreditCardStrategy implements PaymentStrategy {\n    private String cardNumber;\n    private String name;\n\n    public CreditCardStrategy(String cardNumber, String name) {\n        this.cardNumber = cardNumber;\n        this.name = name;\n    }\n\n    @Override\n    public void pay(int amount) {\n        System.out.println(amount + \" paid with credit card.\");\n    }\n}\n\n// PayPalStrategy.java\npublic class PayPalStrategy implements PaymentStrategy {\n    private String email;\n\n    public PayPalStrategy(String email) {\n        this.email = email;\n    }\n\n    @Override\n    public void pay(int amount) {\n        System.out.println(amount + \" paid using PayPal.\");\n    }\n}\n
Step-3: Create a Context Class
// PaymentContext.java\npublic class PaymentContext {\n    private PaymentStrategy strategy;\n\n    public PaymentContext(PaymentStrategy strategy) {\n        this.strategy = strategy;\n    }\n\n    public void setPaymentStrategy(PaymentStrategy strategy) {\n        this.strategy = strategy;\n    }\n\n    public void pay(int amount) {\n        strategy.pay(amount);\n    }\n}\n
Step-4: Test the Strategy Pattern
public class StrategyPatternDemo {\n    public static void main(String[] args) {\n        PaymentContext context = new PaymentContext(new CreditCardStrategy(\"1234-5678-9012\", \"John Doe\"));\n        context.pay(100);\n\n        // Switch strategy at runtime\n        context.setPaymentStrategy(new PayPalStrategy(\"john.doe@example.com\"));\n        context.pay(200);\n    }\n}\n
Spring Boot Example

In a Spring Boot application, the Strategy Pattern can be applied by injecting different strategy implementations using Spring\u2019s dependency injection.

Let's build a simple notification service where the user can choose between sending notifications via Email or SMS.

Create the Strategy Interface
// NotificationStrategy.java\npublic interface NotificationStrategy {\n    void sendNotification(String message);\n}\n
Implement the Concrete Strategies
// EmailNotification.java\nimport org.springframework.stereotype.Service;\n\n@Service(\"email\")\npublic class EmailNotification implements NotificationStrategy {\n    @Override\n    public void sendNotification(String message) {\n        System.out.println(\"Sending Email: \" + message);\n    }\n}\n\n// SMSNotification.java\nimport org.springframework.stereotype.Service;\n\n@Service(\"sms\")\npublic class SMSNotification implements NotificationStrategy {\n    @Override\n    public void sendNotification(String message) {\n        System.out.println(\"Sending SMS: \" + message);\n    }\n}\n
Create a Context Class to Use the Strategy
// NotificationContext.java\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class NotificationContext {\n\n    private final Map<String, NotificationStrategy> strategies;\n\n    public NotificationContext(Map<String, NotificationStrategy> strategies) {\n        this.strategies = strategies;\n    }\n\n    public void send(String type, String message) {\n        NotificationStrategy strategy = strategies.get(type);\n        if (strategy == null) {\n            throw new IllegalArgumentException(\"No such notification type\");\n        }\n        strategy.sendNotification(message);\n    }\n}\n
Create the Controller to Use the Notification Service
// NotificationController.java\nimport org.springframework.web.bind.annotation.*;\n\n@RestController\n@RequestMapping(\"/notify\")\npublic class NotificationController {\n\n    private final NotificationContext context;\n\n    public NotificationController(NotificationContext context) {\n        this.context = context;\n    }\n\n    @PostMapping(\"/{type}\")\n    public void sendNotification(@PathVariable String type, @RequestBody String message) {\n        context.send(type, message);\n    }\n}\n
Application Configuration and Running
// Application.java\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n@SpringBootApplication\npublic class Application {\n    public static void main(String[] args) {\n        SpringApplication.run(Application.class, args);\n    }\n}\n
How It Works Alternative Ways

Using Enum-based Strategies: If the algorithms are simple and limited, you can use an enum with methods for strategy logic.

public enum Operation {\n    ADD {\n        @Override\n        public int execute(int a, int b) {\n            return a + b;\n        }\n    },\n    SUBTRACT {\n        @Override\n        public int execute(int a, int b) {\n            return a - b;\n        }\n    };\n\n    public abstract int execute(int a, int b);\n}\n

Using Java 8 Lambdas: Since Java 8, you can use lambdas to avoid creating multiple strategy classes.

import java.util.function.Consumer;\n\npublic class LambdaStrategyDemo {\n    public static void main(String[] args) {\n        Consumer<String> emailStrategy = message -> System.out.println(\"Email: \" + message);\n        Consumer<String> smsStrategy = message -> System.out.println(\"SMS: \" + message);\n\n        emailStrategy.accept(\"Hello via Email!\");\n        smsStrategy.accept(\"Hello via SMS!\");\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Strategy/#summary","title":"Summary","text":"

The Strategy Pattern is a powerful way to manage dynamic behavior selection in a clean and decoupled way. In a Spring Boot application, you can easily integrate it by using dependency injection. However, it\u2019s essential to use the pattern wisely to avoid unnecessary complexity or overhead. Use it when multiple behaviors or algorithms need to vary independently without modifying client code. Avoid using it if the added complexity is not justified.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/","title":"Concurrency and Parallelism","text":"

Both concurrency and parallelism refer to ways a computer performs multiple tasks, but they differ in how the tasks are executed. Let's go through them, along with related concepts like threads, processes, and programs in this article.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-process","title":"What is Process ?","text":"

A process is an instance of a running program (an executable code). Each process runs in isolation and gets its own memory space. eg: Opening a browser or text editor creates a process.

Characteristics

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-thread","title":"What is Thread ?","text":"

A thread is the smallest unit of execution within a process. Threads run within a process and share the same memory space. eg: A web browser might use multiple threads one for rendering pages, one for handling user input, and another for downloading files.

Characteristics

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#threads-vs-processes","title":"Threads vs Processes","text":"Aspect Threads Processes Memory Space Shared within the same process Separate for each process Overhead Low (lightweight) High (needs its own resources) Communication Easy (shared memory) Complex (requires IPC) Execution Within a single process Each process runs independently Parallelism Can run on multiple cores Can run on multiple cores"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-concurrency","title":"What is Concurrency ?","text":"

Concurrency is when multiple tasks make progress within the same time frame, but not necessarily at the same exact moment. Tasks switch back and forth, sharing resources like CPU time.

Analogy

It\u2019s like a chef preparing multiple dishes, working on one dish for a few minutes, switching to another, and then returning to the previous dish.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#concurrency-with-threads","title":"Concurrency with Threads","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#concurrency-with-processes","title":"Concurrency with Processes","text":"

Note

Concurrency focuses on dealing with multiple tasks by time-sharing resources.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#issues-in-concurrency","title":"Issues in Concurrency","text":"

Below are key issues associated with concurrency.

Race Conditions

When two or more threads/processes try to access and modify shared data simultaneously, the final result may depend on the sequence of execution, leading to unpredictable outcomes.

Example: Two bank transactions updating the same account balance at the same time might result in lost updates.

How to Mitigate

Use synchronization mechanisms like locks or mutexes to control access to shared resources.

Deadlocks

Occurs when two or more threads/processes block each other by holding resources and waiting for resources held by the other. Example: Process A holds Resource 1 and waits for Resource 2, which is held by Process B, and vice versa.

How to Mitigate

Use techniques like resource ordering, deadlock detection, or timeouts to avoid deadlocks.

Livelock

In livelock, threads are constantly changing states to respond to each other but never make actual progress. It\u2019s similar to two people trying to step aside but always stepping in each other\u2019s way. Example: Two threads repeatedly yield control to avoid conflict, but neither progresses.

How to Mitigate

Add randomness or back-off mechanisms to break the cycle.

Starvation

A thread or process may be blocked indefinitely because other higher-priority tasks consume all the resources. Example: A low-priority thread never gets CPU time because high-priority threads always take precedence.

How to Mitigate

Use fair scheduling algorithms to ensure all tasks eventually get a chance to execute.

Context Switching Overhead

Switching between multiple threads or processes incurs a cost, as the CPU saves and restores the state of each thread. Excessive context switching can reduce performance. Example: An overloaded web server with too many threads may spend more time switching contexts than doing actual work.

How to Mitigate

Minimize the number of threads and optimize the task scheduling.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-parallelism","title":"What is Parallelism ?","text":"

Parallelism is when multiple tasks are executed simultaneously, usually on different processors or cores.

Analogy

It\u2019s like having multiple chefs, each cooking one dish at the same time.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#parallelism-with-threads","title":"Parallelism with Threads","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#parallelism-with-processes","title":"Parallelism with Processes","text":"

Note

Parallelism requires multiple CPUs or cores for real simultaneous execution.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#issues-in-parallelism","title":"Issues in Parallelism","text":"

Below are key issues associated with parallelism.

Load Imbalance

If the workload is not evenly distributed across threads or processes, some cores might remain underutilized while others are overloaded. Example: In a matrix multiplication task, if one thread processes a large chunk and another a small chunk, the first thread might take longer, slowing down the whole task.

How to Mitigate

Use dynamic load balancing or work stealing techniques to distribute the workload effectively.

Scalability Bottlenecks

As more threads or processes are added, the overhead of synchronization and communication increases, limiting performance improvements. Example: A program may scale well with 4 threads but show diminishing returns with 16 threads due to synchronization overhead.

How to Mitigate

Optimize algorithms for scalability and minimize shared resources to reduce synchronization costs.

False Sharing

Occurs when multiple threads on different cores modify variables that are close in memory, leading to unnecessary cache invalidations and reduced performance. Example: Two threads updating variables in the same cache line can cause frequent cache synchronization, slowing execution.

How to Mitigate

Align data properly in memory to avoid false sharing.

Communication Overhead

In parallel systems, threads or processes may need to communicate with each other, which adds overhead. Example: In distributed computing, passing messages between nodes can slow down the computation.

How to Mitigate

Reduce communication frequency or use message batching techniques to minimize overhead.

Debugging and Testing Complexity

Debugging concurrent or parallel programs is harder because issues like race conditions or deadlocks may only appear under specific conditions, making them difficult to reproduce. Example: A race condition might only occur when threads execute in a specific order, which is hard to detect in testing.

How to Mitigate

Use debugging tools like thread analyzers and log events to trace execution paths.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#example-scenarios","title":"Example Scenarios","text":"

Concurrent Programming (e.g., in Java, Python)

Parallel Programming (e.g., using Python's multiprocessing or CUDA for GPU computation)

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#common-issues","title":"Common Issues","text":"Data Consistency and Synchronization

Ensuring that shared data remains consistent when accessed by multiple threads or processes is challenging. Example: If multiple threads increment the same counter, the final result may be incorrect without proper synchronization.

How to Mitigate

Use locks, semaphores, or atomic operations to ensure data consistency.

Performance Trade-offs

Parallel or concurrent execution does not always lead to better performance. In some cases, overhead from synchronization, communication, and context switching can negate performance gains. Example: A parallel algorithm may run slower on a small dataset due to the overhead of managing multiple threads.

How to Mitigate

Assess whether the overhead is justified and use profiling tools to analyze performance.

Non-Deterministic Behavior

In concurrent and parallel systems, the order of execution is not guaranteed, leading to non-deterministic results. Example: Running the same multi-threaded program twice may produce different outcomes, making testing and debugging difficult.

How to Mitigate

Use locks and barriers carefully, and design programs to tolerate or avoid non-determinism where possible.

Resource Contention

Threads and processes compete for shared resources, such as memory, I/O, and network bandwidth, leading to bottlenecks. Example: Multiple processes writing to the same disk simultaneously may degrade performance.

How to Mitigate

Optimize resource usage and avoid unnecessary contention by reducing shared resources.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#summary","title":"Summary","text":"

Concurrency deals with multiple tasks making progress within the same period (may or may not be simultaneous) whereas Parallelism deals with tasks running simultaneously on different cores or processors.

Processes and threads are core to both concurrency and parallelism, with threads sharing memory within a process and processes running independently with isolated memory.

While concurrency and parallelism offer significant benefits, they also come with substantial challenges. Managing issues such as race conditions, deadlocks, false sharing, and debugging complexity requires thoughtful design and appropriate use of synchronization techniques. Additionally, scalability bottlenecks and communication overhead can limit the effectiveness of parallel systems.

To mitigate these issues, some fixes are:

"},{"location":"fundamentaldives/FundamentalPrinciples/DRY/","title":"DRY Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#what","title":"What ?","text":"

The DRY Principle stands for Don\u2019t Repeat Yourself. It is a fundamental software development principle aimed at reducing repetition of code and logic. The main idea is that duplication introduces potential risks, if you need to update logic in multiple places, you might forget some, leading to bugs and inconsistencies. When applied well, it improves maintainability, scalability, and clarity, yupp something that lot of codebases misses.

\"Every piece of knowledge must have a single, unambiguous, authoritative representation within a system.\"

In other words, the DRY principle encourages developers to write modular, reusable code and avoid duplicating the same functionality in multiple places. It encourages us to minimize redundancy and write code that does one thing well, making our lives (and the lives of those who maintain our code) much easier.

"},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#how-to-use-example","title":"How to Use Example ?","text":"

Let's cover an example where we apply DRY principle to refactor duplicated logic into reusable.

Without DRY (Code Duplication) Example
public class OrderService {\n\n    public void placeOrder(int productId, int quantity) {\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n        // Logic to place order\n        System.out.println(\"Order placed for product: \" + productId);\n    }\n\n    public void cancelOrder(int orderId) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n        // Logic to cancel order\n        System.out.println(\"Order cancelled: \" + orderId);\n    }\n\n    public void updateOrder(int orderId, int quantity) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n        // Logic to update order\n        System.out.println(\"Order updated with new quantity: \" + quantity);\n    }\n}\n
Explanation

We can extract the common logic into a reusable private method to apply the DRY principle.

With DRY (Refactored Code) Example
public class OrderService {\n\n    public void placeOrder(int productId, int quantity) {\n        validateQuantity(quantity);\n        // Logic to place order\n        System.out.println(\"Order placed for product: \" + productId);\n    }\n\n    public void cancelOrder(int orderId) {\n        validateOrderId(orderId);\n        // Logic to cancel order\n        System.out.println(\"Order cancelled: \" + orderId);\n    }\n\n    public void updateOrder(int orderId, int quantity) {\n        validateOrderId(orderId);\n        validateQuantity(quantity);\n        // Logic to update order\n        System.out.println(\"Order updated with new quantity: \" + quantity);\n    }\n\n    // Reusable validation methods\n    private void validateOrderId(int orderId) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n    }\n\n    private void validateQuantity(int quantity) {\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#summary","title":"Summary","text":"

The DRY principle ensures that code duplication is minimized for easier maintenance and improved consistency. In our example, we extracted common validation logic to private methods, adhering to DRY and making the code more maintainable. However, always be careful to avoid over-abstraction, as not all code repetition is bad. The goal is to achieve a balance between simplicity and reusability.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/","title":"KISS Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#what","title":"What ?","text":"

The KISS Principle stands for \"Keep It Simple, Stupid\". It\u2019s a design principle that emphasizes simplicity, stating that systems and code work best if they are kept simple rather than made unnecessarily complex. The main idea is to avoid over-engineering and unnecessary complications, which can introduce more bugs, make the code harder to maintain, and increase development time.

KISS encourages developers to create code or solutions that are easy to understand, maintain, and modify. The idea is not to use complicated approaches or unnecessary abstractions when a simpler, more straightforward approach will do.

\u201cSimple\u201d here doesn\u2019t mean incomplete or simplistic it means clear, focused, and straightforward.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#where-not-to-use","title":"Where Not to Use ?","text":"

While simplicity is valuable, there are situations where the KISS principle might not apply fully. Over-simplifying can sometimes lead to problems.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#how-to-use-in-practice","title":"How to Use in Practice ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#how-to-use-example","title":"How to Use Example ?","text":"

Let's Consider a example where we want to check if a number is even or odd.

Non-KISS Complex Code
public class NumberUtils {\n    public static boolean isEven(int number) {\n        return isDivisibleByTwo(number);\n    }\n\n    private static boolean isDivisibleByTwo(int number) {\n        if (number % 2 == 0) {\n            return true;\n        } else {\n            return false;\n        }\n    }\n\n    public static void main(String[] args) {\n        System.out.println(\"Is 4 even? \" + isEven(4));\n    }\n}\n
Explanation KISS Simpler Code
public class NumberUtils {\n    public static boolean isEven(int number) {\n        return number % 2 == 0;\n    }\n\n    public static void main(String[] args) {\n        System.out.println(\"Is 4 even? \" + isEven(4));\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#summary","title":"Summary","text":"

The KISS principle encourages developers to create simple, maintainable, and readable code, it means avoiding unnecessary complexity. However, KISS must be balanced some projects or scenarios require complexity to meet performance, modularity, or security needs. In the end, applying KISS is about striking the right balance making the solution as simple as possible, but not simpler than necessary.

"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/","title":"SOLID Principles","text":""},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#single-responsibility-principle","title":"Single Responsibility Principle","text":"

A class should have only one reason to change. This means that each class should focus on a single responsibility or feature.

Violation Example
// Violates SRP: User class has multiple responsibilities.\npublic class User {\n    private String name;\n    private String email;\n\n    public void sendEmail(String message) {\n        // Sending email logic here...\n        System.out.println(\"Email sent to \" + email);\n    }\n\n    public void saveUser() {\n        // Save user to the database\n        System.out.println(\"User saved to DB\");\n    }\n}\n
Fixed Example
// Separate responsibilities into different classes.\npublic class User {\n    private String name;\n    private String email;\n\n    // Getters and setters...\n}\n\npublic class UserRepository {\n    public void save(User user) {\n        System.out.println(\"User saved to DB\");\n    }\n}\n\npublic class EmailService {\n    public void sendEmail(User user, String message) {\n        System.out.println(\"Email sent to \" + user.getEmail());\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#open-closed-principle","title":"Open Closed Principle","text":"

Software components (classes, functions, etc.) should be open for extension but closed for modification. You shouldn\u2019t modify existing code to add new behavior instead, extend it.

Violation Example
// Violates OCP: PaymentProcessor needs to be modified for new payment types.\npublic class PaymentProcessor {\n    public void pay(String type) {\n        if (type.equals(\"credit\")) {\n            System.out.println(\"Processing credit card payment...\");\n        } else if (type.equals(\"paypal\")) {\n            System.out.println(\"Processing PayPal payment...\");\n        }\n    }\n}\n
Fixed Example
// Use an interface for extensibility.\ninterface PaymentMethod {\n    void pay();\n}\n\npublic class CreditCardPayment implements PaymentMethod {\n    public void pay() {\n        System.out.println(\"Processing credit card payment...\");\n    }\n}\n\npublic class PayPalPayment implements PaymentMethod {\n    public void pay() {\n        System.out.println(\"Processing PayPal payment...\");\n    }\n}\n\npublic class PaymentProcessor {\n    public void processPayment(PaymentMethod paymentMethod) {\n        paymentMethod.pay();\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#liskov-substitution-principle","title":"Liskov Substitution Principle","text":"

Subclasses should be substitutable for their base class without altering the correctness of the program.

Violation Example
// Violates LSP: Square changes the behavior of Rectangle.\nclass Rectangle {\n    protected int width, height;\n\n    public void setWidth(int width) {\n        this.width = width;\n    }\n\n    public void setHeight(int height) {\n        this.height = height;\n    }\n\n    public int getArea() {\n        return width * height;\n    }\n}\n\nclass Square extends Rectangle {\n    @Override\n    public void setWidth(int width) {\n        this.width = width;\n        this.height = width;  // Violates LSP: Unexpected behavior.\n    }\n\n    @Override\n    public void setHeight(int height) {\n        this.width = height;\n        this.height = height;\n    }\n}\n
Fixed Example
// Use separate classes to maintain correct behavior.\nclass Shape {\n    public int getArea() {\n        return 0;\n    }\n}\n\nclass Rectangle extends Shape {\n    protected int width, height;\n\n    public Rectangle(int width, int height) {\n        this.width = width;\n        this.height = height;\n    }\n\n    @Override\n    public int getArea() {\n        return width * height;\n    }\n}\n\nclass Square extends Shape {\n    private int side;\n\n    public Square(int side) {\n        this.side = side;\n    }\n\n    @Override\n    public int getArea() {\n        return side * side;\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#interface-segregation-principle","title":"Interface Segregation Principle","text":"

A client should not be forced to implement interfaces that it does not use. Instead, smaller, more specific interfaces should be preferred.

Violation Example
// Violates ISP: Cat(r) needs to implement unnecessary methods.\ninterface Vehicle {\n    void drive();\n    void fly();\n}\n\nclass Car implements Vehicle {\n    @Override\n    public void drive() {\n        System.out.println(\"Car is driving...\");\n    }\n\n    @Override\n    public void fly() {\n        // Car can't fly! This method is unnecessary.\n        throw new UnsupportedOperationException(\"Car can't fly\");\n    }\n}\n
Fixed Example
// Use separate interfaces for each capability.\ninterface Drivable {\n    void drive();\n}\n\ninterface Flyable {\n    void fly();\n}\n\nclass Car implements Drivable {\n    @Override\n    public void drive() {\n        System.out.println(\"Car is driving...\");\n    }\n}\n\nclass Plane implements Drivable, Flyable {\n    @Override\n    public void drive() {\n        System.out.println(\"Plane is taxiing...\");\n    }\n\n    @Override\n    public void fly() {\n        System.out.println(\"Plane is flying...\");\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#dependency-inversion-principle","title":"Dependency Inversion Principle","text":"

High-level modules should not depend on low-level modules. Both should depend on abstractions.

Violation Example
// Violates DIP: High-level class depends on a specific implementation.\nclass SQLDatabase {\n    public void connect() {\n        System.out.println(\"Connected to SQL Database\");\n    }\n}\n\nclass Application {\n    private SQLDatabase database;\n\n    public Application() {\n        database = new SQLDatabase();  // Tight coupling to SQLDatabase.\n    }\n\n    public void start() {\n        database.connect();\n    }\n}\n
Fixed Example
// Depend on an abstraction instead of a specific implementation.\ninterface Database {\n    void connect();\n}\n\nclass SQLDatabase implements Database {\n    public void connect() {\n        System.out.println(\"Connected to SQL Database\");\n    }\n}\n\nclass NoSQLDatabase implements Database {\n    public void connect() {\n        System.out.println(\"Connected to NoSQL Database\");\n    }\n}\n\nclass Application {\n    private Database database;\n\n    public Application(Database database) {\n        this.database = database;\n    }\n\n    public void start() {\n        database.connect();\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#summary","title":"Summary","text":"Principle Definition Violation Example Fixed Example Single Responsibility A class should have only one reason to change. User class manages both data and emails. Separate User, EmailService, UserRepository. Open Closed Open for extension, closed for modification. Modify PaymentProcessor for new methods. Use PaymentMethod interface and extend classes. Liskov Substitution Subtypes should behave like their base type. Square modifies behavior of Rectangle. Separate Square and Rectangle classes. Interface Segregation Use small, specific interfaces. Car implements unnecessary fly() method. Split into Drivable and Flyable interfaces. Dependency Inversion Depend on abstractions, not implementations. App depends on SQLDatabase directly. Use Database interface for loose coupling."},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/","title":"YAGNI Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#what","title":"What ?","text":"

YAGNI stands for \"You Aren\u2019t Gonna Need It.\" It is one of the core principles of Extreme Programming (XP) and Agile development. The principle advises developers not to add any functionality or code until it is truly needed. Essentially, YAGNI promotes simplicity and avoids speculative development.

Always implement things when you actually need them, never when you just foresee you might need them.

"},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#how-to-apply","title":"How to Apply ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#how-to-use-example","title":"How to Use Example ?","text":"

Lets go through a simple example to illustrate YAGNI in practice.

Without YAGNI Example Adding Unnecessary Functionality
public class UserService {\n\n    // Current functionality: Retrieve a user by ID\n    public User getUserById(int id) {\n        // Logic to retrieve user\n        return new User(id, \"John Doe\");\n    }\n\n    // Unnecessary feature: Speculating that we may need a \"deleteUser\" method in the future\n    public void deleteUser(int id) {\n        // Logic to delete user (unimplemented)\n        System.out.println(\"User deleted: \" + id);\n    }\n\n    // Another unnecessary feature: Thinking we might need email notifications\n    public void sendEmailNotification(User user) {\n        // Logic to send email (unimplemented)\n        System.out.println(\"Email sent to: \" + user.getEmail());\n    }\n}\n\nclass User {\n    private int id;\n    private String name;\n\n    public User(int id, String name) {\n        this.id = id;\n        this.name = name;\n    }\n\n    public String getEmail() {\n        return name.toLowerCase() + \"@example.com\";\n    }\n}\n
Explanation Applying YAGNI Only Implement What is Needed Now
public class UserService {\n\n    // Only add the necessary method for now\n    public User getUserById(int id) {\n        // Logic to retrieve user\n        return new User(id, \"John Doe\");\n    }\n}\n\nclass User {\n    private int id;\n    private String name;\n\n    public User(int id, String name) {\n        this.id = id;\n        this.name = name;\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#summary","title":"Summary","text":"

The YAGNI principle encourages developers to focus on delivering only the required features at a given point in time, avoiding speculative development that may never be used. This approach fosters simplicity, maintainability, and efficiency in the codebase. However, it should be applied carefully there are scenarios (like architecture or security) where anticipating needs is necessary, When used properly, YAGNI helps teams build better software, faster, and with fewer headaches down the line.

"},{"location":"langdives/Java/4Pillars/","title":"4 Pillars - What, How, and Why ?","text":""},{"location":"langdives/Java/4Pillars/#encapsulation","title":"Encapsulation","text":"

What: Hiding the internal details of an object and only exposing necessary parts through public methods.

Why: It helps in data hiding and ensures controlled access to variables.

How: Use private variables to restrict direct access and provide getters and setters to access and modify the data.

Encapsulation Example
public class Person {\n    private String name; // Encapsulated field\n\n    // Getter\n    public String getName() {\n        return name;\n    }\n\n    // Setter\n    public void setName(String name) {\n        this.name = name;\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#inheritance","title":"Inheritance","text":"

What: Allows a class (child/subclass) to acquire the properties and behaviors of another class (parent/superclass).

How: Use the extends keyword.

Why: Promotes code reusability and establishes a parent-child relationship.

Inheritance Example
class Animal {\n    public void sound() {\n        System.out.println(\"Animals make sound\");\n    }\n}\n\nclass Dog extends Animal {\n    @Override\n    public void sound() {\n        System.out.println(\"Dog barks\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#polymorphism","title":"Polymorphism","text":"

What: Ability to process objects differently based on their data type or class.

Why: Increases flexibility and supports dynamic method invocation.

Polymorphism Example Method Overloading
class Calculator {\n    public int add(int a, int b) {\n        return a + b;\n    }\n\n    public double add(double a, double b) {\n        return a + b;\n    }\n}\n
Method Overriding
class Animal {\n    public void sound() {\n        System.out.println(\"Animal makes sound\");\n    }\n}\n\nclass Cat extends Animal {\n    @Override\n    public void sound() {\n        System.out.println(\"Cat meows\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#abstraction","title":"Abstraction","text":"

What: Hiding the complex implementation details and only exposing the essential features.

Why: Helps in achieving modularity and loose coupling between components.

How: Use abstract classes and interfaces.

Abstraction Example Abstract Class Example
abstract class Vehicle {\n    abstract void start();\n}\n\nclass Car extends Vehicle {\n    @Override\n    void start() {\n        System.out.println(\"Car starts with a key\");\n    }\n}\n
Interface Example
interface Animal {\n    void eat();\n}\n\nclass Dog implements Animal {\n    @Override\n    public void eat() {\n        System.out.println(\"Dog eats bones\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#summary","title":"Summary","text":"Aspect Encapsulation Inheritance Polymorphism Abstraction Definition Bundling data and methods together and restricting access to data. Mechanism for a subclass to acquire properties of a parent class. Allowing methods to take different forms (overloading/overriding). Hiding implementation details while showing only essential features. Focus Protecting data and providing controlled access. Code reuse and establishing a parent-child hierarchy. Dynamic behavior based on object type. Simplifying complex systems by exposing only key details. Achieved Through Using private fields, and public getters/setters. Using the extends keyword to derive subclasses. Overloading (compile-time) and overriding (runtime). Using abstract classes or interfaces. Key Benefit Data hiding and modular code. Reduces redundancy and promotes code reuse. Flexibility and extensibility of behavior. Promotes loose coupling and modularity. Access Modifiers Requires private, protected, or public. Involves all inheritance-accessible modifiers. Leverages method visibility across class hierarchies. Abstract methods can be protected or public (not private). Real-World Analogy A capsule with medicine inside it hides the internal components. A child inheriting traits from their parents. A shape object behaving differently as circle/square. A remote control exposing buttons without showing internal circuits. Code Dependency Independent within the class. Dependent on parent-child relationship. Involves multiple forms of a single method/class. Can work with unrelated classes sharing common behavior."},{"location":"langdives/Java/AccessModifPPPPP/","title":"Access modifiers","text":""},{"location":"langdives/Java/AccessModifPPPPP/#public","title":"Public","text":"

Keyword: public Access: Accessible from anywhere (inside/outside the class, package, or project). Usage: Typically used for classes, methods, and variables that need global access.

Public Example
public class MyClass {\n    public int value = 10;\n    public void display() {\n        System.out.println(\"Public method\");\n    }\n}\n
"},{"location":"langdives/Java/AccessModifPPPPP/#private","title":"Private","text":"

Keyword: private Access: Accessible only within the same class. Usage: Used to hide class fields or methods, following the principle of encapsulation.

Private Example
public class MyClass {\n    private int value = 10; // Not accessible outside this class\n\n    private void display() {\n        System.out.println(\"Private method\");\n    }\n}\n
"},{"location":"langdives/Java/AccessModifPPPPP/#protected","title":"Protected","text":"

Keyword: protected Access: Accessible within the same package and by subclasses (even if outside the package). Usage: Useful when extending classes across packages.

Protected Example
public class MyClass {\n    protected int value = 10;\n\n    protected void display() {\n        System.out.println(\"Protected method\");\n    }\n}\n

Note

If accessed by a subclass in a different package, it must be through inheritance (not directly via an instance).

"},{"location":"langdives/Java/AccessModifPPPPP/#package-private","title":"Package-Private","text":"

Keyword: No keyword (Default Access) Access: Accessible only within the same package. Usage: Used for classes and members that don\u2019t need to be accessed outside their package.

Package-Private Example
class MyClass { // No access modifier, so it's package-private\n    int value = 10;\n\n    void display() {\n        System.out.println(\"Package-private method\");\n    }\n}\n

Note

Package-private is the default if no modifier is specified.

"},{"location":"langdives/Java/AccessModifPPPPP/#access-summary","title":"Access Summary","text":"Modifier Same Class Same Package Subclass (Different Package) Other Packages public \u2705 \u2705 \u2705 \u2705 protected \u2705 \u2705 \u2705 (via inheritance) \u274c (default) \u2705 \u2705 \u274c \u274c private \u2705 \u274c \u274c \u274c"},{"location":"langdives/Java/Collections-JCF/","title":"Java Collections Framework","text":""},{"location":"langdives/Java/Collections-JCF/#categories-of-collections","title":"Categories of Collections","text":""},{"location":"langdives/Java/Collections-JCF/#list","title":"List","text":""},{"location":"langdives/Java/Collections-JCF/#arraylist","title":"ArrayList","text":"

A resizable array, fast random access. It's backed by Array, When random access is needed and insertions are rare you can use this.

Operations & Complexities

Thread Safety: Not synchronized, use Collections.synchronizedList() for thread safety.

Example
List<String> list = new ArrayList<>();\nlist.add(\"Apple\");\nlist.get(0);  // Fast access\n
"},{"location":"langdives/Java/Collections-JCF/#linkedlist","title":"LinkedList","text":"

A Doubly linked list, better at frequent insertions and deletions. It's backed by Doubly Linked List, When insertion/deletion in the middle is frequent you can use this.

Operations & Complexities

Thread Safety: Not synchronized.

Example
List<String> list = new LinkedList<>();\nlist.add(\"Banana\");\nlist.addFirst(\"Apple\");  // O(1) insertion at head\n
"},{"location":"langdives/Java/Collections-JCF/#set","title":"Set","text":""},{"location":"langdives/Java/Collections-JCF/#hashset","title":"HashSet","text":"

It's Unordered, no duplicates, backed by Hash Table. You can use this When you need fast lookups and no duplicates.

Operations & Complexities

Thread Safety: Not synchronized, use Collections.synchronizedSet().

Example
Set<String> set = new HashSet<>();\nset.add(\"Cat\");\nset.add(\"Dog\");\n
"},{"location":"langdives/Java/Collections-JCF/#linkedhashset","title":"LinkedHashSet","text":"

This maintains insertion order, backed by a Hash Table + Linked List. You can use this when you need order-preserving behavior.

Operations & Complexities: Same as HashSet (O(1) operations) but with slightly higher overhead due to linked list maintenance.

Example
Set<String> set = new LinkedHashSet<>();\nset.add(\"Apple\");\nset.add(\"Banana\");\n
"},{"location":"langdives/Java/Collections-JCF/#treeset","title":"TreeSet","text":"

A Sorted set, backed by Red-Black Tree, When you need sorted data.

Operations & Complexities

Thread Safety: Not synchronized.

Example
Set<Integer> set = new TreeSet<>();\nset.add(5);\nset.add(1);  // Sorted automatically\n
"},{"location":"langdives/Java/Collections-JCF/#queuedeque","title":"Queue/Deque","text":""},{"location":"langdives/Java/Collections-JCF/#priorityqueue","title":"PriorityQueue","text":"

Elements are ordered based on their natural order or a custom comparator. It's backed by Binary Heap, When priority-based retrieval is needed.

Operations & Complexities

Example
Queue<Integer> queue = new PriorityQueue<>();\nqueue.add(10);\nqueue.add(5);  // 5 will be at the top\n
"},{"location":"langdives/Java/Collections-JCF/#arraydeque","title":"ArrayDeque","text":"

Resizable-array-backed deque, allows adding/removing from both ends, When you need both stack and queue operations.

Operations & Complexities

Example
Deque<String> deque = new ArrayDeque<>();\ndeque.addFirst(\"First\");\ndeque.addLast(\"Last\");\n
"},{"location":"langdives/Java/Collections-JCF/#map","title":"Map","text":""},{"location":"langdives/Java/Collections-JCF/#hashmap","title":"HashMap","text":"

Stores key-value pairs, backed by Hash Table, Fast lookups for key-value pairs.

Operations & Complexities

Thread Safety: Not synchronized, use ConcurrentHashMap for thread-safe operations.

Example
Map<String, Integer> map = new HashMap<>();\nmap.put(\"Apple\", 1);\nmap.put(\"Banana\", 2);\n
"},{"location":"langdives/Java/Collections-JCF/#linkedhashmap","title":"LinkedHashMap","text":"

Maintains insertion order, backed by Hash Table + Linked List, When ordering of entries matters.

Example
Map<String, Integer> map = new LinkedHashMap<>();\nmap.put(\"Apple\", 1);\nmap.put(\"Banana\", 2);  // Maintains insertion order\n
"},{"location":"langdives/Java/Collections-JCF/#treemap","title":"TreeMap","text":"

Sorted map, backed by Red-Black Tree, When you need a sorted key-value store.

Operations & Complexities: Get/Put/Remove: O(log n)

Example
Map<Integer, String> map = new TreeMap<>();\nmap.put(3, \"Three\");\nmap.put(1, \"One\");  // Sorted by key\n
"},{"location":"langdives/Java/Collections-JCF/#synchronized-collections","title":"Synchronized Collections","text":"

Synchronized Wrappers: Use Collections.synchronizedList() or Collections.synchronizedSet() to make collections thread-safe.

Example
List<String> list = Collections.synchronizedList(new ArrayList<>());\n

Concurrent Collections: Use ConcurrentHashMap, CopyOnWriteArrayList, or BlockingQueue for better thread-safe alternatives.

"},{"location":"langdives/Java/Collections-JCF/#summary","title":"Summary","text":"Collection Type Backed By Access Time Insertion Time Deletion Time Thread Safety Use Case ArrayList List Resizable Array O(1) O(1) (amortized) O(n) No Fast random access LinkedList List Doubly Linked List O(n) O(1) O(1) No Frequent insertions/deletions HashSet Set Hash Table - O(1) O(1) No Unique elements LinkedHashSet Set Hash Table + Linked List - O(1) O(1) No Unique elements with insertion order TreeSet Set Red-Black Tree - O(log n) O(log n) No Sorted elements PriorityQueue Queue Binary Heap - O(log n) O(log n) No Priority-based retrieval ArrayDeque Deque Resizable Array - O(1) O(1) No Both stack and queue operations HashMap Map Hash Table - O(1) O(1) No Fast key-value lookups LinkedHashMap Map Hash Table + Linked List - O(1) O(1) No Key-value lookups with insertion order TreeMap Map Red-Black Tree - O(log n) O(log n) No Sorted key-value pairs ConcurrentHashMap Concurrent Map Segmented Hash Table O(1) O(1) O(1) Yes Thread-safe map CopyOnWriteArrayList Concurrent List Array O(n) O(1) O(1) Yes Thread-safe list BlockingQueue Concurrent Queue Queue/Linked Nodes Depends on impl. O(1) O(1) Yes Thread-safe queue"},{"location":"langdives/Java/GarbageCollection/","title":"Garbage Collection","text":"

Garbage collection (GC) in Java is essential to automatic memory management, ensuring that objects no longer needed by an application are reclaimed, and the memory they occupied is freed. This allows Java developers to avoid memory leaks and other resource-management issues.

"},{"location":"langdives/Java/GarbageCollection/#basics","title":"Basics","text":"

Heap Memory is divided into several areas, mainly

How GC Works ? Simply GC works by going through phases

"},{"location":"langdives/Java/GarbageCollection/#phases","title":"Phases","text":""},{"location":"langdives/Java/GarbageCollection/#types-of-collectors","title":"Types of Collectors","text":""},{"location":"langdives/Java/GarbageCollection/#serial-garbage-collector","title":"Serial Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#parallel-garbage-collector","title":"Parallel Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#cms-collector","title":"CMS Collector","text":""},{"location":"langdives/Java/GarbageCollection/#g1-garbage-collector","title":"G1 Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#z-garbage-collector-zgc","title":"Z Garbage Collector (ZGC)","text":""},{"location":"langdives/Java/GarbageCollection/#shenandoah-gc","title":"Shenandoah GC","text":""},{"location":"langdives/Java/GarbageCollection/#comparing-collectors","title":"Comparing Collectors","text":"Collector Use Case Pause Time Heap Size Parallelism Serial Small apps, single-threaded High Small (<1 GB) Single-threaded Parallel Throughput-heavy apps Moderate Medium to large Multi-threaded CMS Low-latency (deprecated) Low Medium to large Concurrent G1 Balanced throughput & latency Predictable Large Mixed ZGC Ultra-low latency, huge heaps Sub-millisecond Multi-TB Highly concurrent Shenandoah Latency-sensitive, large heaps Sub-millisecond Multi-TB Highly concurrent

Note

ZGC and Shenandoah use advanced algorithms that perform incremental marking, remapping, and concurrent sweeping. They avoid long pauses by offloading most GC work to concurrent threads.

"},{"location":"langdives/Java/GarbageCollection/#garbage-collection-concepts","title":"Garbage Collection Concepts","text":""},{"location":"langdives/Java/GarbageCollection/#generational-collection","title":"Generational Collection","text":"

Java heap is divided into:

Garbage collection types:

"},{"location":"langdives/Java/GarbageCollection/#advanced-memory-layout","title":"Advanced Memory Layout","text":"

Dynamic Region Management:

"},{"location":"langdives/Java/GarbageCollection/#safepoints","title":"Safepoints","text":"

Java threads stop only at specific safepoints for GC (or other JVM activities). A safepoint is a checkpoint where all threads must reach before GC can start for eg Executing bytecode that triggers allocation failure, method calls, or back branches in loops.

The JVM injects safepoint polls within running code to ensure threads hit these safepoints regularly, Too many safepoint pauses indicate GC tuning issues or excessive thread blocking.

JVM may delay triggering GC due to waiting for all threads to reach a safepoint, which introduces unpredictable latency. This is critical in low-latency systems like trading applications.

"},{"location":"langdives/Java/GarbageCollection/#stop-the-world-stw","title":"Stop the World (STW)","text":"

A Stop-The-World (STW) event occurs when the garbage collector (GC) halts all application threads to perform critical tasks like marking live objects, reclaiming memory, and compacting the heap. These pauses, necessary to prevent heap inconsistencies, impact application performance, especially in latency-sensitive environments.

The duration of STW events depends on heap size, the number of live objects, and the GC algorithm. Traditional collectors like Serial GC and Parallel GC have longer STW pauses, while CMS reduces them with concurrent marking but still requires short pauses for initial and final marking. Modern GCs like G1 GC, ZGC, and Shenandoah GC minimize pauses by performing most work concurrently with application threads, achieving millisecond-range STW durations.

Optimizations include using low-latency collectors, tuning GC settings, reducing allocation pressure, and monitoring GC behavior with tools like JFR or VisualVM. For latency-critical applications, advanced collectors and careful memory management are essential to mitigate the impact of STW events.

"},{"location":"langdives/Java/GarbageCollection/#barriers-tables-fences","title":"Barriers, Tables & Fences","text":""},{"location":"langdives/Java/GarbageCollection/#write-barriers","title":"Write Barriers","text":""},{"location":"langdives/Java/GarbageCollection/#card-tables","title":"Card Tables","text":""},{"location":"langdives/Java/GarbageCollection/#memory-fences","title":"Memory Fences","text":""},{"location":"langdives/Java/GarbageCollection/#hierarchy","title":"Hierarchy","text":""},{"location":"langdives/Java/GarbageCollection/#allocation","title":"Allocation","text":"

New Object Creation

Minor GC (Young Generation Collection)

"},{"location":"langdives/Java/GarbageCollection/#thresholds-promotions","title":"Thresholds & Promotions","text":"

Max Tenuring Threshold

Promotion Failures

"},{"location":"langdives/Java/GarbageCollection/#major-gc-old-generation","title":"Major GC & Old Generation","text":"

When Old Generation Fills Up

Concurrent Collectors (CMS, G1, ZGC, Shenandoah)

"},{"location":"langdives/Java/GarbageCollection/#full-gc-stop-the-world-event","title":"Full GC (Stop-the-World Event)","text":"

What Causes Full GC?

What Happens During Full GC

"},{"location":"langdives/Java/GarbageCollection/#safepoints-write-barriers","title":"Safepoints & Write Barriers","text":"

Safepoints

Write Barriers

"},{"location":"langdives/Java/GarbageCollection/#finalization-reference-types","title":"Finalization & Reference Types","text":"

Soft, Weak, and Phantom References

"},{"location":"langdives/Java/GarbageCollection/#gc-flow-structure","title":"GC Flow Structure","text":"

GC Flow

  1. Object Creation

    • Allocated in Eden Space (Young Gen).
  2. Eden Full \u2192 Trigger Minor GC

    • Mark live objects and move them to Survivor Spaces.
    • Objects surviving multiple cycles move to the Old Generation.
  3. Old Gen Full \u2192 Trigger Major GC

    • Mark and sweep objects in the Old Gen.
    • If heap is fragmented, trigger Full GC.
  4. Concurrent Collections (G1, ZGC)

    • Perform marking and sweeping concurrently without stopping the world.
  5. Full GC (Stop-the-World)

    • When all else fails, Full GC freezes all threads, marks, sweeps, and compacts memory.
"},{"location":"langdives/Java/GarbageCollection/#fragmentation","title":"Fragmentation","text":"

Fragmentation refers to the inefficient use of memory that occurs when free memory is split into small, non-contiguous blocks, making it difficult to allocate larger contiguous blocks even if the total free memory is sufficient. In Java, fragmentation can occur in both the young and old generations of the heap.

"},{"location":"langdives/Java/GarbageCollection/#types","title":"Types","text":"

Internal Fragmentation: Occurs when a block of memory is larger than what is actually needed. For example, if an object requires 10 bytes but is allocated a 16-byte block, the remaining 6 bytes are wasted.

External Fragmentation: Happens when free memory is scattered in small chunks across the heap. This can lead to a situation where there isn\u2019t enough contiguous space available to fulfill a large allocation request, even if the total free memory is sufficient.

"},{"location":"langdives/Java/GarbageCollection/#causes","title":"Causes","text":"

Object Lifetimes: Short-lived objects are frequently allocated and deallocated, especially in the young generation. This can create gaps in memory as these objects are collected, leading to external fragmentation.

Promotion of Objects: When objects in the young generation are promoted to the old generation, if the old generation is already fragmented, it may become difficult to allocate new objects.

Full GCs: In collectors like CMS (Concurrent Mark-Sweep), memory is reclaimed but not compacted, leaving fragmented free spaces.

"},{"location":"langdives/Java/GarbageCollection/#effects","title":"Effects","text":"

OutOfMemoryError: Fragmentation can cause allocation failures, leading to OutOfMemoryError if there isn\u2019t enough contiguous memory available for new object allocations.

Increased GC Overhead: The JVM may spend more time during GC cycles trying to find suitable spaces for object allocation, which can degrade performance.

Heap Fragmentation: Some collectors (like CMS) suffer from heap fragmentation since they don\u2019t compact memory after reclaiming space.

Pinned Objects: Sometimes, objects cannot be moved during GC (e.g., JNI references or thread-local objects). This can lead to fragmentation.

"},{"location":"langdives/Java/GarbageCollection/#mitigating","title":"Mitigating","text":"

Using G1 GC or ZGC: These collectors are designed to handle fragmentation better than older collectors. They manage memory in regions and perform compaction as part of their regular operations.

Heap Size Adjustments: Increasing the size of the old generation can help reduce the frequency of fragmentation issues.

Monitoring and Tuning: Regularly monitor memory usage and GC logs to identify fragmentation patterns. Tuning the JVM parameters can help alleviate fragmentation issues.

Object Pooling: Reusing objects instead of frequently allocating and deallocating them can help reduce fragmentation.

"},{"location":"langdives/Java/GarbageCollection/#configuring-garbage-collection","title":"Configuring garbage collection","text":"

Configuring garbage collection and its parameters in Java is primarily done through JVM (Java Virtual Machine) options when starting your application.

"},{"location":"langdives/Java/GarbageCollection/#how-to-configure-params","title":"How to Configure Params","text":"

Command-Line Options: You can specify GC options when you start your Java application using the java command.

Example
java -Xms512m -Xmx4g -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -jar my-application.jar\n

Environment Variables: For containerized applications (like those running in Docker or Kubernetes), you can set JVM options through environment variables or directly in the configuration file.

Example in Docker
ENV JAVA_OPTS=\"-Xms512m -Xmx4g -XX:+UseG1GC\"\nCMD java $JAVA_OPTS -jar your-application.jar\n

Configuration Files: Some applications allow you to specify JVM options in a configuration file, which can be helpful for managing multiple parameters in one place.

"},{"location":"langdives/Java/GarbageCollection/#common-gc-options","title":"Common GC Options","text":"

Basic Heap Size Configuration

Choosing a Garbage Collector

G1 GC
-XX:+UseG1GC\n
Parallel GC
-XX:+UseParallelGC\n
CMS (Concurrent Mark-Sweep)
-XX:+UseConcMarkSweepGC\n
ZGC
-XX:+UseZGC\n
Shenandoah
-XX:+UseShenandoahGC\n

Tuning G1 GC

Tuning Parallel GC

Monitoring and Logging

Controlling Object Promotion

Metaspace Configuration (for class metadata)

"},{"location":"langdives/Java/GarbageCollection/#example-configuration","title":"Example Configuration","text":"

Here\u2019s an example of a command to start a Java application with G1 GC and some tuning parameters

Example
java -Xms1g -Xmx8g -XX:+UseG1GC \\\n   -XX:MaxGCPauseMillis=200 \\\n   -XX:G1HeapRegionSize=16m \\\n   -XX:InitiatingHeapOccupancyPercent=30 \\\n   -XX:ConcGCThreads=2 \\\n   -Xlog:gc*:file=gc.log \\\n   -jar my-application.jar\n
"},{"location":"langdives/Java/GarbageCollection/#deep-tuning-techniques","title":"Deep Tuning Techniques","text":"

Heap Size and GC Frequency

GC Latency and Response Times

Application Throughput vs Latency

Survivor Space Tuning

Tuning G1 GC

Young GC Tuning

Tuning Java GC for High Performance

Handling Multi-Terabyte Heaps

GC in Cloud and Microservices

Latency Monitoring

"},{"location":"langdives/Java/GarbageCollection/#tools-for-analysis","title":"Tools for Analysis","text":"

GC Logs: Capture GC details by adding -Xlog:gc* or -XX:+PrintGCDetails JVM options.

Sample GC Log
[GC (Allocation Failure) [PSYoungGen: 2048K->512K(2560K)] 4096K->2048K(8192K), 0.0103451 secs]\n

VisualVM: A monitoring tool bundled with the JDK for real-time JVM performance monitoring.

Java Flight Recorder (JFR): An advanced profiling tool that collects detailed JVM metrics, including GC data.

JConsole: Visualize JVM statistics and monitor heap usage.

"},{"location":"langdives/Java/GarbageCollection/#diagnosing-troubleshooting","title":"Diagnosing & Troubleshooting","text":"

OutOfMemoryError (OOM): Common causes

Heap Dump Analysis:

Detecting Leaks: Look for large, unreachable objects with static references or growing collections (e.g., large HashMap or ArrayList).

Java Flight Recorder (JFR): JFR provides detailed memory profiling without heavy overhead. Collect a recording and analyze it for object lifetimes, GC events, and thread behavior.

"},{"location":"langdives/Java/GarbageCollection/#summary","title":"Summary","text":"

Choose the Right GC Collector

Monitor and Analyze

Avoid Full GC

Pre-tuning Advice

"},{"location":"langdives/Java/Gradle/","title":"Gradle","text":""},{"location":"langdives/Java/Gradle/#what-is-gradle","title":"What is Gradle ?","text":"

Gradle is a modern, powerful build automation tool used for building, testing, and deploying applications. It is particularly popular in Java and Android projects due to its flexibility and performance. Gradle uses a Groovy/Kotlin-based DSL to configure and manage builds, allowing for easy customization.

"},{"location":"langdives/Java/Gradle/#how-gradle-works","title":"How Gradle Works ?","text":"

Gradle organizes builds using a Directed Acyclic Graph (DAG) of tasks, ensuring that tasks are executed only when necessary.

The Build process has Three phases

Initialization Phase

Configuration Phase

Execution Phase

"},{"location":"langdives/Java/Gradle/#gradle-build","title":"Gradle Build","text":"

Gradle build configuration is done in the build.gradle file. This file uses Groovy or Kotlin DSL to describe:

build.gradle Example

plugins {\n    id 'java'        // Apply Java plugin\n    id 'application' // Allow running the app from CLI\n}\n\nrepositories {\n    mavenCentral() // Use Maven Central for dependencies\n}\n\ndependencies {\n    implementation 'org.apache.commons:commons-lang3:3.12.0'  // Runtime dependency\n    testImplementation 'junit:junit:4.13.2'  // Test dependency\n}\n\napplication {\n    mainClass = 'com.example.UnderTheHood'  // Entry point of the app\n}\n
"},{"location":"langdives/Java/Gradle/#understanding-buildgradle","title":"Understanding build.gradle","text":"

Plugins Section

plugins {\n    id 'java'\n    id 'application'\n}\n

Repositories Section

repositories {\n    mavenCentral()\n}\n

Dependencies Section

dependencies {\n    implementation 'org.apache.commons:commons-lang3:3.12.0'\n    testImplementation 'junit:junit:4.13.2'\n}\n

Application Configuration

application {\n    mainClass = 'com.example.UnderTheHood'\n}\n
"},{"location":"langdives/Java/Gradle/#dependency-management","title":"Dependency Management","text":"

Gradle allows automatic dependency management. Dependencies (like libraries and frameworks) are fetched from repositories such as:

Gradle resolves dependencies in the following order:

Offline Mode
# Forces Gradle to use only the local cache \n# and does not try to access remote repositories.\ngradle build --offline\n
"},{"location":"langdives/Java/Gradle/#custom-tasks","title":"Custom Tasks","text":"

Gradle allows developers to create custom tasks to automate specific workflows.

Custom Tasks Example Create Custom Task
task uth {\n    doLast {\n        println 'Hello, UnderTheHood ;)'\n    }\n}\n
Run the Custom task
gradle uth\n
Output
Hello, UnderTheHood ;)\n

Custom tasks can be chained and made dependent on other tasks:

task compileCode {\n    dependsOn clean\n    doLast {\n        println 'Compiling code...'\n    }\n}\n
"},{"location":"langdives/Java/Gradle/#publishing-artifacts","title":"Publishing Artifacts","text":"

You can publish your project\u2019s artifacts (e.g., JARs) to Maven Local or Remote repositories using the maven-publish plugin.

Local Maven Publish Example

Apply Maven Publish Plugin
plugins {\n    id 'maven-publish'\n}\n
Configure Publishing in build.gradle
publishing {\n    publications {\n        mavenJava(MavenPublication) {\n            from components.java\n        }\n    }\n    repositories {\n        mavenLocal()  // Publish to local Maven repository (~/.m2/repository)\n    }\n}\n
Publish the Artifact
# This will install the JAR into your local Maven repository.\ngradle publishToMavenLocal\n
"},{"location":"langdives/Java/Gradle/#gradle-project-structure","title":"Gradle Project Structure","text":"Gradle recommended standard directory structure
/my-project\n\u2502\n\u251c\u2500\u2500 build.gradle          # Build configuration file\n\u251c\u2500\u2500 settings.gradle       # Project settings file\n\u251c\u2500\u2500 src\n\u2502   \u2514\u2500\u2500 main\n\u2502       \u2514\u2500\u2500 java          # Source code\n\u2502   \u2514\u2500\u2500 test\n\u2502       \u2514\u2500\u2500 java          # Unit tests\n\u2514\u2500\u2500 build                 # Output directory (JAR, WAR)\n

Gradle supports multi-module projects where different modules are part of the same build.

Example Multi-Project Structure
/root-project\n\u2502\n\u251c\u2500\u2500 build.gradle            # Root project configuration\n\u251c\u2500\u2500 settings.gradle         # Lists sub-projects\n\u251c\u2500\u2500 module-1/\n\u2502   \u2514\u2500\u2500 build.gradle        # Configuration for module 1\n\u2514\u2500\u2500 module-2/\n    \u2514\u2500\u2500 build.gradle        # Configuration for module 2\n
settings.gradle
rootProject.name = 'multi-project-example'\ninclude 'module-1', 'module-2'\n
Running the build
# This will build all modules in the correct order.\ngradle build\n
"},{"location":"langdives/Java/Gradle/#gradle-wrapper","title":"Gradle Wrapper","text":"

The Gradle Wrapper is a feature that allows a project to include a specific Gradle version along with scripts to execute builds. This ensures that anyone working on the project uses the same Gradle version without requiring a manual installation.

The Gradle Wrapper consists of:

"},{"location":"langdives/Java/Gradle/#why-use-wrapper","title":"Why Use Wrapper ?","text":""},{"location":"langdives/Java/Gradle/#gradle-commands","title":"Gradle Commands","text":"

Here are some essential Gradle commands for working with projects:

Command Description gradle init Initializes a new Gradle project. gradle build Compiles, tests, and packages the project. gradle run Runs the application (if using the Application plugin). gradle clean Removes the build/ directory for a fresh build. gradle tasks Lists all available tasks. gradle test Runs all tests in the project. gradle publish Publishes artifacts to Maven repositories."},{"location":"langdives/Java/Gradle/#performance-benefits","title":"Performance Benefits","text":"

Gradle is designed for speed and efficiency

"},{"location":"langdives/Java/Gradle/#summary","title":"Summary","text":"

Gradle provides several advantages for modern projects

"},{"location":"langdives/Java/JDK-JRE-JVM/","title":"Java","text":"

Java is a high-level, object-oriented programming language designed for portability, security, and ease of use. It is known for its \"write once, run anywhere\" capability, allowing developers to create software that can run on any device with a Java Virtual Machine (JVM).

"},{"location":"langdives/Java/JDK-JRE-JVM/#architecture","title":"Architecture","text":"

The Java architecture is composed of three main components:

"},{"location":"langdives/Java/JDK-JRE-JVM/#jdk","title":"JDK","text":"

The JDK Java Development Kit is a comprehensive development environment for building Java applications. It provides all the tools necessary for Java developers to create, compile, and package Java applications.

Components

"},{"location":"langdives/Java/JDK-JRE-JVM/#jre","title":"JRE","text":"

The JRE-Java Runtime Environment provides the libraries, Java Virtual Machine (JVM), and other components necessary for running Java applications. It does not include development tools, making it ideal for end-users who only need to run Java applications.

Components

"},{"location":"langdives/Java/JDK-JRE-JVM/#jvm","title":"JVM","text":"

The JVM Java Virtual Machine is an abstract computing machine that enables a computer to run Java programs. It is responsible for interpreting and executing the bytecode generated by the Java compiler.

Functions

"},{"location":"langdives/Java/JDK-JRE-JVM/#hierarchical-structure","title":"Hierarchical Structure","text":"

Hierarchical Structure

JDK (includes javac, JRE, Tools)\n\u2514\u2500\u2500 JRE (includes JVM and libraries)\n    \u2514\u2500\u2500 JVM (executes bytecode)\n
"},{"location":"langdives/Java/JDK-JRE-JVM/#how-java-executes","title":"How Java Executes ?","text":"

The execution of Java code involves several steps, transitioning through the JDK, JRE, and JVM before reaching the machine code that the computer's CPU executes.

Code Writing: Java developers write source code in plain text files using the .java extension. This code defines classes and methods that make up the Java application.

Code Compilation: The developer uses the Java compiler (javac), which is part of the JDK, to compile the .java file. This process translates the human-readable Java code into an intermediate form known as bytecode. The output of this step is a .class file containing the bytecode.

Running the Application: To run the Java application, the developer executes a command using the Java runtime environment (e.g., java ClassName), which triggers the JRE, The JRE includes the JVM, which performs the following steps:

Machine Code Execution: The machine code generated by the JVM is executed by the host operating system's CPU. This process allows Java applications to be platform-independent, as the same bytecode can run on any system that has a compatible JVM.

"},{"location":"langdives/Java/Java8vs11vs17vs21/","title":"Java 8 vs 11 vs 17 vs 21","text":"

A detailed comparison of Java 8, Java 11, Java 17, and Java 21, summarizing the key differences, improvements, and deprecations introduced across these versions:

"},{"location":"langdives/Java/Java8vs11vs17vs21/#java-8-released-march-2014","title":"Java 8 (Released March 2014)","text":"

Java 8 is a long-term support (LTS) release, bringing significant new features:

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#drawbacks","title":"Drawbacks","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-11-released-sept-2018","title":"Java 11 (Released Sept 2018)","text":"

Java 11 is also LTS and a significant milestone since it removed many outdated APIs and modularized the runtime.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_1","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations","title":"Removals & Deprecations","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_1","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-17-released-sept-2021","title":"Java 17 (Released Sept 2021)","text":"

Java 17 is an LTS release, refining many features introduced in Java 9-16 and stabilizing the platform.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_2","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations_1","title":"Removals & Deprecations","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#garbage-improvements","title":"Garbage Improvements","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_2","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-21-released-sept-2023","title":"Java 21 (Released Sept 2023)","text":"

Java 21 is a non-LTS release (though with unofficial support from some vendors). It introduces many experimental and innovative features.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_3","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations_2","title":"Removals & Deprecations:","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_3","title":"Performance & Security:","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#key-versions-differences","title":"Key Versions Differences","text":"Feature / Change Java 8 Java 11 Java 17 Java 21 LTS Release Yes Yes Yes No Lambda Expressions Yes Yes Yes Yes HTTP Client No Yes (HTTP/2) Yes Yes Modular System (JPMS) No Yes Yes Yes Records No No Yes Yes Sealed Classes No No Yes Yes Text Blocks No No Yes Yes Pattern Matching (instanceof) No No Yes Yes Virtual Threads (Loom) No No No Yes Garbage Collectors G1 GC ZGC ZGC, Shenandoah Improved ZGC, Shenandoah String Enhancements Basic strip(), repeat() Text Blocks String Templates TLS Version 1.2 1.3 1.3 1.3 Security Manager Available Deprecated Deprecated Removed Nashorn JavaScript Engine Yes Deprecated Removed Removed"},{"location":"langdives/Java/Java8vs11vs17vs21/#summary","title":"Summary","text":"

For production systems, upgrading to Java 17 is generally recommended unless your project needs experimental features from Java 21.

"},{"location":"langdives/Java/JavaPassBy/","title":"Is Java Pass By Value or By Reference ?","text":"

Java is stricly pass by value but lets go in depth.

"},{"location":"langdives/Java/JavaPassBy/#reference-vs-primitive","title":"Reference vs Primitive","text":"

Primitive Types: These are the basic data types in Java (e.g., int, char, boolean). When you pass a primitive type to a method, a copy of the value is made.

Reference Types: These include objects, arrays, and instances of classes. When you pass a reference type to a method, you pass a reference (or pointer) to the actual object in memory, not the object itself.

"},{"location":"langdives/Java/JavaPassBy/#pass-by-value","title":"Pass by Value","text":"

Java uses a mechanism called pass by value for method arguments, but it\u2019s important to clarify how this applies to primitive and reference types.

"},{"location":"langdives/Java/JavaPassBy/#primitive-types","title":"Primitive Types","text":"

When you pass a primitive type to a method, the method receives a copy of the variable's value. Any changes made to this copy do not affect the original variable.

Example
public class PassByValueExample {\n    public static void main(String[] args) {\n        int num = 10;\n        modifyValue(num); // Passing primitive\n        System.out.println(num); // Output: 10\n    }\n\n    public static void modifyValue(int value) {\n        value = 20; // Only modifies the copy\n    }\n}\n
"},{"location":"langdives/Java/JavaPassBy/#reference-types","title":"Reference Types","text":"

When you pass a reference type to a method, the reference itself is passed by value. This means the method receives a copy of the reference to the object. While you can change the object's properties, you cannot change the reference to point to a different object.

Example
class MyClass {\n    int value;\n\n    MyClass(int value) {\n        this.value = value;\n    }\n}\n\npublic class PassByReferenceExample {\n    public static void main(String[] args) {\n        MyClass obj = new MyClass(10);\n        modifyObject(obj); // Passing reference\n        System.out.println(obj.value); // Output: 20\n    }\n\n    public static void modifyObject(MyClass myObject) {\n        myObject.value = 20; // Modifies the object's property\n        // myObject = new MyClass(30); // This would not affect the original object reference only changes local myObject.\n    }\n}\n
"},{"location":"langdives/Java/JavaPassBy/#why","title":"Why?","text":"

When a method is called, a new stack frame is created, and local variables (including method parameters) are stored in this stack frame. Objects are stored in the heap, and the reference to these objects is passed to methods. When you modify the object\u2019s state inside the method, it reflects outside the method because both the original reference and the parameter reference point to the same object in memory.

"},{"location":"langdives/Java/JavaPassBy/#scope-and-lifetime","title":"Scope and Lifetime","text":""},{"location":"langdives/Java/JavaPassBy/#summary","title":"Summary","text":"

Java is pass-by-value:

Changes to the object through the reference affect the original object, but reassignment of the reference does not affect the original reference.

"},{"location":"langdives/Java/KeyWordsTerminolgies/","title":"Keywords and Terminolgies","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#class-object","title":"Class & Object","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#modifiers","title":"Modifiers","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#inheritance-polymorphism","title":"Inheritance & Polymorphism","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#control-flow","title":"Control Flow","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#exception-handling","title":"Exception Handling","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#memory-managementthreada","title":"Memory Management/Threada","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#types","title":"Types","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#others","title":"Others","text":""},{"location":"langdives/Java/Locking-Intrinsic/","title":"Locking","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers synchronized blocks.

"},{"location":"langdives/Java/Locking-Intrinsic/#what-is-locking","title":"What is Locking?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock.

"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-and-intrinsic-locks","title":"Synchronized and Intrinsic Locks","text":"

Java\u2019s synchronized key word is one of the primary ways to control access to shared resources in multithreaded programs. It ensures thread safety by providing mutual exclusion and visibility guarantees. Let's go further into every aspect of synchronized.

How synchronized Works ?

When a method or block is marked as synchronized, the JVM uses a monitor lock (intrinsic lock) for the associated object or class. The monitor is a synchronization construct provided by the JVM.

Two things happen when a thread enters a synchronized block or method:

Intrinsic Lock: Each Java object has an intrinsic lock (also called monitor lock) associated with it, The thread that enters the synchronized block acquires the intrinsic lock. When it leaves the block, it releases the lock, allowing other threads to acquire it.

"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-methods","title":"Synchronized Methods","text":""},{"location":"langdives/Java/Locking-Intrinsic/#instance-level-locking","title":"Instance-Level Locking","text":"

When you synchronize a non-static method, the thread acquires the lock on the instance of the class (the this object).

public synchronized void increment() {\n    // Lock acquired on the current instance (this)\n    count++;\n}\n
Example with Instance-Level Locking
class Counter {\n    private int count = 0;\n\n    public synchronized void increment() {\n        count++;\n    }\n\n    public synchronized int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n

Why does this work ?

Since both threads are operating on the same Counter object, only one thread at a time can execute the increment() method due to instance-level locking.

"},{"location":"langdives/Java/Locking-Intrinsic/#class-level-locking","title":"Class-Level Locking","text":"

A static synchronized method locks on the Class object (i.e., ClassName.class) rather than on an instance. This ensures that all threads calling static methods on the class are synchronized.

public synchronized static void staticIncrement() {\n    // Lock acquired on the class object (Counter.class)\n}\n
Example with Class-Level Locking
class Counter {\n    private static int count = 0;\n\n    public synchronized static void increment() {\n        count++;\n    }\n\n    public synchronized static int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) Counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) Counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + Counter.getCount());  // Output: 2000\n    }\n}\n
"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-blocks","title":"Synchronized Blocks","text":"

A synchronized block provides more control than a synchronized method. You can choose which object\u2019s intrinsic lock to use, instead of locking the entire method.

public void increment() {\n    synchronized (this) {  // Locking on the current instance\n        count++;\n    }\n}\n
When to use ?
class Counter {\n    private int count = 0;\n    private final Object lock = new Object();\n\n    public void increment() {\n        synchronized (lock) {  // Locking on a custom object\n            count++;\n        }\n    }\n}\n
Example: Synchronized Block with Fine-Grained Control
public void updateBothCounters(Counter counter1, Counter counter2) {\n    synchronized (counter1) {  // Locking on the first Counter object\n        counter1.increment();\n    }\n    synchronized (counter2) {  // Locking on the second Counter object\n        counter2.increment();\n    }\n}\n
"},{"location":"langdives/Java/Locking-Intrinsic/#how-it-work-internally","title":"How it Work Internally","text":""},{"location":"langdives/Java/Locking-Intrinsic/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Intrinsic/#potential-issues","title":"Potential Issues","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/","title":"Issues with Locking - DeadLock","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how deadlocks occur, how to prevent them, and practical examples of various techniques to detect and resolve deadlocks. A deadlock is a common concurrency issue in multithreaded programs and can severely impact performance.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#what-is-deadlock","title":"What is Deadlock ?","text":"

A deadlock occurs when two or more threads are blocked indefinitely, Each thread is waiting for a lock held by the other, and neither can proceed.

This results in a circular wait, where no thread can release the locks it holds, leading to a deadlock condition.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#how-deadlock-occurs","title":"How Deadlock Occurs ?","text":"

Let\u2019s revisit the classic deadlock example.

Deadlock Example
class A {\n    public synchronized void methodA(B b) {\n        System.out.println(Thread.currentThread().getName() + \": Locked A, waiting for B...\");\n        try {\n            Thread.sleep(50);  // Simulate some work\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n        b.last();  // Waiting for lock on object B\n    }\n\n    public synchronized void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside A.last()\");\n    }\n}\n\nclass B {\n    public synchronized void methodB(A a) {\n        System.out.println(Thread.currentThread().getName() + \": Locked B, waiting for A...\");\n        try {\n            Thread.sleep(50);  // Simulate some work\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n        a.last();  // Waiting for lock on object A\n    }\n\n    public synchronized void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside B.last()\");\n    }\n}\n\npublic class DeadlockDemo {\n    public static void main(String[] args) {\n        A a = new A();\n        B b = new B();\n\n        Thread t1 = new Thread(() -> a.methodA(b), \"Thread 1\");\n        Thread t2 = new Thread(() -> b.methodB(a), \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Flow Analysis

  1. Thread 1 starts and calls a.methodA(b). It acquires the lock on object A and prints:

    Thread 1: Locked A, waiting for B...\n

  2. Thread 2 starts and calls b.methodB(a). It acquires the lock on object B and prints:

    Thread 2: Locked B, waiting for A...\n

  3. Now:

    • Thread 1 holds the lock on A and waits for Thread 2 to release the lock on B.
    • Thread 2 holds the lock on B and waits for Thread 1 to release the lock on A.

Both threads are waiting indefinitely, resulting in a deadlock.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#how-to-avoid","title":"How to Avoid ?","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/#acquiring-locks-in-a-order","title":"Acquiring Locks in a Order","text":"

If all threads acquire locks in the same order, deadlock can be prevented.

Modified Example: Acquiring Locks in the Same Order
class A {\n    public void methodA(B b) {\n        synchronized (this) {\n            System.out.println(Thread.currentThread().getName() + \": Locked A, waiting for B...\");\n            synchronized (b) {\n                System.out.println(Thread.currentThread().getName() + \": Acquired lock on B\");\n                b.last();\n            }\n        }\n    }\n\n    public void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside A.last()\");\n    }\n}\n\nclass B {\n    public void methodB(A a) {\n        synchronized (this) {\n            System.out.println(Thread.currentThread().getName() + \": Locked B, waiting for A...\");\n            synchronized (a) {\n                System.out.println(Thread.currentThread().getName() + \": Acquired lock on A\");\n                a.last();\n            }\n        }\n    }\n\n    public void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside B.last()\");\n    }\n}\n\npublic class DeadlockResolved {\n    public static void main(String[] args) {\n        A a = new A();\n        B b = new B();\n\n        Thread t1 = new Thread(() -> a.methodA(b), \"Thread 1\");\n        Thread t2 = new Thread(() -> b.methodB(a), \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Explanation

Both threads now acquire locks in the same order (A \u2192 B). This ensures that deadlock cannot occur.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#using-trylock-with-timeout","title":"Using tryLock() with Timeout","text":"

The tryLock() method attempts to acquire a lock and fails gracefully if the lock is not available within a specified time.

Deadlock Prevention using tryLock() example
import java.util.concurrent.TimeUnit;\nimport java.util.concurrent.locks.ReentrantLock;\n\npublic class TryLockDemo {\n    private final ReentrantLock lockA = new ReentrantLock();\n    private final ReentrantLock lockB = new ReentrantLock();\n\n    public void methodA() {\n        try {\n            if (lockA.tryLock(1, TimeUnit.SECONDS)) {\n                System.out.println(Thread.currentThread().getName() + \": Locked A\");\n                Thread.sleep(50);  // Simulate some work\n\n                if (lockB.tryLock(1, TimeUnit.SECONDS)) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \": Locked B\");\n                    } finally {\n                        lockB.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \": Could not acquire lock B, releasing A\");\n                }\n\n                lockA.unlock();\n            }\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n    }\n\n    public void methodB() {\n        try {\n            if (lockB.tryLock(1, TimeUnit.SECONDS)) {\n                System.out.println(Thread.currentThread().getName() + \": Locked B\");\n                Thread.sleep(50);  // Simulate some work\n\n                if (lockA.tryLock(1, TimeUnit.SECONDS)) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \": Locked A\");\n                    } finally {\n                        lockA.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \": Could not acquire lock A, releasing B\");\n                }\n\n                lockB.unlock();\n            }\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n    }\n\n    public static void main(String[] args) {\n        TryLockDemo demo = new TryLockDemo();\n\n        Thread t1 = new Thread(demo::methodA, \"Thread 1\");\n        Thread t2 = new Thread(demo::methodB, \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Explanation

If a thread fails to acquire a lock within the timeout, it releases any locks it holds, avoiding a deadlock.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#detecting-using-monitoring-tools","title":"Detecting Using Monitoring Tools","text":"

You can detect deadlocks using tools like:

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/#summary","title":"Summary","text":"

Deadlocks are one of the most common and dangerous issues in multithreaded programming.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/","title":"Issues with Locking - LiveLock","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how livelock occur, how to prevent them, and practical examples of various techniques to detect and resolve livelock.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#what-is-livelock","title":"What is Livelock ?","text":"

In a livelock, multiple threads remain active but unable to make progress because they keep responding to each other\u2019s actions. Unlike deadlock, where threads are stuck waiting for locks indefinitely, threads in a livelock keep changing their states in response to each other, but they fail to reach a final state or make useful progress.

Key difference from deadlock

In deadlock, threads are blocked waiting for each other, while in livelock, threads are not blocked, but they keep releasing and reacquiring locks or changing states in a way that prevents progress.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#example-of-livelock","title":"Example of Livelock","text":"

Consider two people trying to pick up a spoon to eat, but they keep yielding to each other in an attempt to be polite. Neither person can make progress because they\u2019re constantly checking and responding to each other\u2019s actions.

Livelock Example
class Spoon {\n    private boolean isAvailable = true;\n\n    public synchronized boolean pickUp() {\n        if (isAvailable) {\n            isAvailable = false;\n            return true;\n        }\n        return false;\n    }\n\n    public synchronized void putDown() {\n        isAvailable = true;\n    }\n}\n\npublic class LivelockDemo {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            while (!spoon.pickUp()) {\n                System.out.println(\"Person 1: Waiting for spoon...\");\n                Thread.yield();  // Yield control to other threads\n            }\n            System.out.println(\"Person 1: Picked up spoon!\");\n        });\n\n        Thread person2 = new Thread(() -> {\n            while (!spoon.pickUp()) {\n                System.out.println(\"Person 2: Waiting for spoon...\");\n                Thread.yield();  // Yield control to other threads\n            }\n            System.out.println(\"Person 2: Picked up spoon!\");\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#causes-of-livelock","title":"Causes of Livelock","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#how-to-avoid-livelocks","title":"How to Avoid Livelocks","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-timeouts-for-locking","title":"Use Timeouts for Locking","text":"

Using timeouts helps threads avoid indefinite waiting. If a thread cannot acquire the lock within a certain time, it can stop trying or take an alternative path.

Using tryLock() with Timeout
import java.util.concurrent.TimeUnit;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public boolean pickUp() throws InterruptedException {\n        // Try to acquire the lock with a timeout\n        return lock.tryLock(1, TimeUnit.SECONDS);\n    }\n\n    public void putDown() {\n        lock.unlock();\n    }\n}\n\npublic class LivelockFixed {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            try {\n                if (spoon.pickUp()) {\n                    System.out.println(\"Person 1: Picked up spoon!\");\n                    spoon.putDown();\n                } else {\n                    System.out.println(\"Person 1: Couldn't get the spoon in time.\");\n                }\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread person2 = new Thread(() -> {\n            try {\n                if (spoon.pickUp()) {\n                    System.out.println(\"Person 2: Picked up spoon!\");\n                    spoon.putDown();\n                } else {\n                    System.out.println(\"Person 2: Couldn't get the spoon in time.\");\n                }\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Why it works ?

If a thread fails to acquire the lock within 1 second, it backs off instead of trying indefinitely.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-back-off-strategies","title":"Use Back-off Strategies","text":"

A back-off strategy makes threads wait for a random amount of time before retrying. This avoids a situation where two threads keep checking the same lock in sync.

Back-off Strategy Example
import java.util.Random;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Random random = new Random();\n\n    public boolean tryPickUp() {\n        return lock.tryLock();\n    }\n\n    public void putDown() {\n        lock.unlock();\n    }\n\n    public void backOff() throws InterruptedException {\n        Thread.sleep(random.nextInt(100));  // Wait for a random time\n    }\n}\n\npublic class LivelockWithBackoff {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            try {\n                while (!spoon.tryPickUp()) {\n                    System.out.println(\"Person 1: Waiting...\");\n                    spoon.backOff();  // Wait before retrying\n                }\n                System.out.println(\"Person 1: Picked up spoon!\");\n                spoon.putDown();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread person2 = new Thread(() -> {\n            try {\n                while (!spoon.tryPickUp()) {\n                    System.out.println(\"Person 2: Waiting...\");\n                    spoon.backOff();  // Wait before retrying\n                }\n                System.out.println(\"Person 2: Picked up spoon!\");\n                spoon.putDown();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Why it works ?

The random back-off time prevents threads from retrying in lockstep, avoiding livelock.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#avoid-excessive-yielding","title":"Avoid Excessive Yielding","text":"

Frequent use of Thread.yield() can lead to livelock. Instead, use timeouts or back-off strategies to prevent threads from constantly giving way to each other.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-condition-variables","title":"Use Condition Variables","text":"

Use Condition variables (available with ReentrantLock) to properly coordinate threads waiting on specific conditions.

Using Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private boolean isAvailable = true;\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Condition spoonAvailable = lock.newCondition();\n\n    public void pickUp() throws InterruptedException {\n        lock.lock();\n        try {\n            while (!isAvailable) {\n                spoonAvailable.await();  // Wait until spoon is available\n            }\n            isAvailable = false;\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void putDown() {\n        lock.lock();\n        try {\n            isAvailable = true;\n            spoonAvailable.signal();  // Notify waiting thread\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n

Why it works ?

Using condition variables ensures that only one thread proceeds when the spoon becomes available, avoiding busy-waiting and yielding.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#summary","title":"Summary","text":"

Livelocks can be tricky to detect because threads remain active, but they fail to make meaningful progress. By using timeouts, back-off strategies, condition variables, and proper locking mechanisms.

"},{"location":"langdives/Java/Locking-Issues-Others/","title":"Issues with Locking - Other Issues","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

We will cover key locking issues in Java in this article like race conditions, thread contention, missed signals, nested locks, overuse of locks, and performance impact. Each section contains causes, examples, solutions, and best practices to avoid or mitigate these issues.

"},{"location":"langdives/Java/Locking-Issues-Others/#race-conditions-despite-locking","title":"Race Conditions Despite Locking","text":"Cause

A race condition occurs when multiple threads access a shared resource without proper synchronization, leading to inconsistent results based on the timing of thread execution. Even with partial locks, a shared variable may still be accessed inconsistently if not protected properly.

Race Condition Example

class Counter {\n    private int count = 0;\n\n    public void increment() {\n        synchronized (this) {\n            count++;\n        }\n    }\n\n    public int getCount() {\n        // Not synchronized, potential race condition.\n        return count;\n    }\n}\n

Problem

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#contention-performance-issues","title":"Contention & Performance Issues","text":"Cause

When multiple threads compete for the same lock, they spend time waiting for the lock to become available, reducing throughput and performance.

Contention Example

class BankAccount {\n    private int balance = 100;\n\n    public synchronized void withdraw(int amount) {\n        balance -= amount;\n    }\n\n    public synchronized int getBalance() {\n        return balance;\n    }\n}\n

Problem

If multiple threads frequently access the withdraw() method, contention for the lock will occur, degrading performance.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#missed-signals-lost-wake-ups","title":"Missed Signals & Lost Wake-ups","text":"Cause

When a thread misses a notify() signal because it was not yet waiting on the lock, a lost wake-up occurs. This results in threads waiting indefinitely for a signal that has already been sent.

Lost Wake-Up Example

public synchronized void produce() throws InterruptedException {\n    while (available) {\n        wait();  // Missed if notify() was called before reaching here.\n    }\n    available = true;\n    notify();\n}\n
Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#nested-locks-ordering-issues","title":"Nested Locks & Ordering Issues","text":"Cause

Using multiple locks can cause deadlocks if threads acquire locks in different orders.

Deadlock Example

synchronized (lock1) {\n    synchronized (lock2) {\n        // Critical section\n    }\n}\n

Problem

If Thread 1 acquires lock1 and Thread 2 acquires lock2, both threads will wait indefinitely for each other\u2019s lock, resulting in a deadlock.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#overuse-of-locks","title":"Overuse of Locks","text":"Cause

Using too many locks or locking too frequently can reduce parallelism, resulting in poor scalability, If every method in a class is synchronized, threads will frequently block each other, reducing concurrency and efficiency.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#overhead-of-locking","title":"Overhead of Locking","text":"Cause

Locking adds overhead in the form of: - Context switches between threads. - CPU cache invalidation. - JVM's monitor management for intrinsic locks.

Performance Issues with Synchronized Code

Excessive locking causes contention and frequent context switches, impacting throughput and latency.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-Others/#summary","title":"Summary","text":"

Locking is essential to ensure thread safety, but improper use can lead to issues such as race conditions, deadlocks, livelocks, contention, and performance degradation. Understanding these issues and following best practices will help you write efficient, scalable, and thread-safe code. Using fine-grained locks and concurrent utilities wisely to maximize concurrency while minimizing risks.

"},{"location":"langdives/Java/Locking-Issues-Starvation/","title":"Issues with Locking - Starvation","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how starvation occur, how to prevent them, and practical examples of various techniques to detect and resolve starvation.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#what-is-starvation","title":"What is Starvation ?","text":"

Starvation is a condition where low-priority threads are unable to gain access to resources because higher-priority threads or unfair scheduling policies monopolize CPU time or locks. As a result, the low-priority thread starves and never gets the chance to run, even though it is ready to execute.

This issue can manifest in multithreaded programs when locks or resources are continuously granted to specific threads, leaving others waiting indefinitely. It can occur not only due to CPU scheduling but also due to improper locking strategies, unfair algorithms, or resource starvation.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#causes-of-starvation","title":"Causes of Starvation","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#example-of-starvation","title":"Example of Starvation","text":"Starvation with Unfair Lock Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class StarvationDemo {\n    private static final ReentrantLock lock = new ReentrantLock();  // Unfair lock\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                }\n            }\n        };\n\n        Thread highPriority = new Thread(task, \"High-Priority\");\n        highPriority.setPriority(Thread.MAX_PRIORITY);\n\n        Thread lowPriority = new Thread(task, \"Low-Priority\");\n        lowPriority.setPriority(Thread.MIN_PRIORITY);\n\n        highPriority.start();\n        lowPriority.start();\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Locking-Issues-Starvation/#how-to-avoid","title":"How to Avoid ?","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#use-fair-locks","title":"Use Fair Locks","text":"

Using fair locks ensures that the longest-waiting thread gets the lock first. This prevents threads from skipping the queue and ensures all threads get a chance to execute.

Fair Lock Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class FairLockDemo {\n    private static final ReentrantLock lock = new ReentrantLock(true);  // Fair lock\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                }\n            }\n        };\n\n        Thread highPriority = new Thread(task, \"High-Priority\");\n        Thread lowPriority = new Thread(task, \"Low-Priority\");\n\n        highPriority.setPriority(Thread.MAX_PRIORITY);\n        lowPriority.setPriority(Thread.MIN_PRIORITY);\n\n        highPriority.start();\n        lowPriority.start();\n    }\n}\n

Note

"},{"location":"langdives/Java/Locking-Issues-Starvation/#avoid-priority-based-scheduling","title":"Avoid Priority-Based Scheduling","text":"

Although Java allows you to assign priorities to threads, the JVM\u2019s thread scheduler may not always honor them consistently. It\u2019s generally recommended to avoid relying on thread priorities for critical tasks, If you need to control thread scheduling, use fair locks or condition variables instead of thread priorities.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#backoff-strategies","title":"Backoff Strategies","text":"

Using backoff strategies (delays) between retries can help reduce contention for resources. This ensures that no thread monopolizes the CPU by continuously attempting to acquire a resource.

Backoff Strategy Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class BackoffDemo {\n    private static final ReentrantLock lock = new ReentrantLock();\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                    try {\n                        Thread.sleep((long) (Math.random() * 100));  // Random delay\n                    } catch (InterruptedException e) {\n                        Thread.currentThread().interrupt();\n                    }\n                }\n            }\n        };\n\n        Thread t1 = new Thread(task, \"Thread-1\");\n        Thread t2 = new Thread(task, \"Thread-2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Note

Random delays ensure that threads do not engage in busy-waiting loops, reducing contention and improving fairness.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#use-thread-pools","title":"Use Thread Pools","text":"

When dealing with many concurrent tasks, using a thread pool ensures that threads are fairly scheduled and resources are shared efficiently.

Using ThreadPoolExecutor Example
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class ThreadPoolDemo {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(2);\n\n        Runnable task = () -> {\n            System.out.println(Thread.currentThread().getName() + \" is running\");\n        };\n\n        for (int i = 0; i < 5; i++) {\n            executor.submit(task);\n        }\n\n        executor.shutdown();\n    }\n}\n

Note

Using thread pools avoids creating too many threads and ensures fair resource sharing.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#avoid-long-critical-sections","title":"Avoid Long Critical Sections","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#use-condition-variables","title":"Use Condition Variables","text":"

Instead of relying on priorities or busy-waiting, use Condition objects with ReentrantLock to manage thread coordination efficiently.

Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.Lock;\nimport java.util.concurrent.locks.ReentrantLock;\n\npublic class ConditionDemo {\n    private static final Lock lock = new ReentrantLock();\n    private static final Condition condition = lock.newCondition();\n\n    public static void main(String[] args) {\n        new Thread(() -> {\n            lock.lock();\n            try {\n                System.out.println(\"Waiting...\");\n                condition.await();  // Wait for a signal\n                System.out.println(\"Resumed\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            } finally {\n                lock.unlock();\n            }\n        }).start();\n\n        new Thread(() -> {\n            lock.lock();\n            try {\n                Thread.sleep(1000);\n                condition.signal();  // Signal the waiting thread\n                System.out.println(\"Signaled\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            } finally {\n                lock.unlock();\n            }\n        }).start();\n    }\n}\n

Note

Using conditions helps avoid busy-waiting and ensures efficient thread signaling.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#summary","title":"Summary","text":"

Starvation is a subtle but serious issue in multithreaded programs, particularly when some threads are prioritized over others or when resources are monopolized by specific threads. By using fair locks, thread pools, backoff strategies, and avoiding long critical sections.

"},{"location":"langdives/Java/Locking-Reentrant/","title":"Locking.","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers reentrant locks.

"},{"location":"langdives/Java/Locking-Reentrant/#what-is-locking","title":"What is Locking ?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock.

"},{"location":"langdives/Java/Locking-Reentrant/#what-is-reentrantlock","title":"What is ReentrantLock ?","text":"

The ReentrantLock class, introduced in Java 5, offers more control over thread synchronization than the synchronized keyword. It allows for advanced locking techniques such as fairness policies, tryLock, and interruptible locks. Let\u2019s explore everything about ReentrantLock, including its use cases, internal mechanisms, and best practices.

ReentrantLock is a concrete class in the java.util.concurrent.locks package that implements the Lock interface.

Note

Example
import java.util.concurrent.locks.ReentrantLock;\n\nclass Counter {\n    private int count = 0;\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public void increment() {\n        lock.lock();  // Acquire the lock\n        try {\n            count++;\n        } finally {\n            lock.unlock();  // Release the lock\n        }\n    }\n\n    public int getCount() {\n        lock.lock();\n        try {\n            return count;\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n
"},{"location":"langdives/Java/Locking-Reentrant/#how-it-works-internally","title":"How it Works Internally ?","text":"

Lock Acquisition: When a thread calls lock(), it tries to acquire the lock. If the lock is available, the thread proceeds otherwise, it blocks until the lock becomes available.

Reentrancy: A thread that holds the lock can acquire the lock again without blocking. This is useful when a thread *nters a method that also calls another synchronized method or block that requires the same lock.

Fair vs Unfair Locking:

"},{"location":"langdives/Java/Locking-Reentrant/#advanced-locking-techniques","title":"Advanced Locking Techniques","text":""},{"location":"langdives/Java/Locking-Reentrant/#trylock","title":"tryLock()","text":"

The tryLock() method attempts to acquire the lock without blocking. It returns true if the lock is acquired, otherwise false.

Example
if (lock.tryLock()) {\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} else {\n    System.out.println(\"Could not acquire lock, doing something else...\");\n}\n
When to use ?

When you want to avoid blocking indefinitely if the lock is not available.

"},{"location":"langdives/Java/Locking-Reentrant/#trylock-with-timeout","title":"tryLock with Timeout","text":"

The tryLock(long timeout, TimeUnit unit) method waits for a specific amount of time to acquire the lock.

Example
import java.util.concurrent.TimeUnit;\n\nif (lock.tryLock(1, TimeUnit.SECONDS)) {\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} else {\n    System.out.println(\"Could not acquire lock within timeout.\");\n}\n
When to use ?

When waiting indefinitely is not practical, such as network operations or I/O tasks.

"},{"location":"langdives/Java/Locking-Reentrant/#interruptible-lock-acquisition","title":"Interruptible Lock Acquisition","text":"

The lockInterruptibly() method allows a thread to acquire the lock but respond to interrupts while waiting.

Example
try {\n    lock.lockInterruptibly();  // Wait for lock, but respond to interrupts\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} catch (InterruptedException e) {\n    System.out.println(\"Thread was interrupted.\");\n}\n
When to use ?

Use when a thread needs to be interrupted while waiting for a lock.

"},{"location":"langdives/Java/Locking-Reentrant/#behavior","title":"Behavior","text":"

A reentrant lock means that the same thread can acquire the lock multiple times without blocking itself. However, the thread must release the lock the same number of times to fully unlock it.

Behavior Example
class ReentrantExample {\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public void outerMethod() {\n        lock.lock();\n        try {\n            System.out.println(\"In outer method\");\n            innerMethod();\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void innerMethod() {\n        lock.lock();\n        try {\n            System.out.println(\"In inner method\");\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n
Explanation

In this example, outerMethod calls innerMethod, and both methods acquire the same lock. This works without issues because ReentrantLock allows reentrant locking.

"},{"location":"langdives/Java/Locking-Reentrant/#condition-variables","title":"Condition Variables","text":"

The Condition interface (associated with a ReentrantLock) allows a thread to wait for a condition to be met before proceeding. It provides better control than the traditional wait()/notify().

Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass ConditionExample {\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Condition condition = lock.newCondition();\n    private boolean ready = false;\n\n    public void awaitCondition() throws InterruptedException {\n        lock.lock();\n        try {\n            while (!ready) {\n                condition.await();  // Wait for signal\n            }\n            System.out.println(\"Condition met, proceeding...\");\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void signalCondition() {\n        lock.lock();\n        try {\n            ready = true;\n            condition.signal();  // Signal waiting thread\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n
"},{"location":"langdives/Java/Locking-Reentrant/#performance","title":"Performance","text":"

ReentrantLock has more overhead than synchronized due to fairness policies and explicit lock management, Use synchronized for simple scenarios, use reentrantLock for more complex locking requirements(eg: tryLock, fairness).

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/","title":"Locking","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers read-write locks.

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#what-is-locking","title":"What is Locking ?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock and ReentrantReadWriteLock.

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#what-is-reentrantreadwritelock","title":"What is ReentrantReadWriteLock?","text":"

The ReentrantReadWriteLock is a specialized lock from Java\u2019s java.util.concurrent.locks package, designed to improve performance in concurrent systems by separating read and write operations. It provides more efficient locking behavior when the majority of operations are read-only, allowing multiple readers to access the shared resource simultaneously but blocking writers until all reading operations are complete.

A ReentrantReadWriteLock maintains two types of locks:

This separation helps optimize performance for read-heavy workloads.

Example
import java.util.concurrent.locks.ReentrantReadWriteLock;\n\nclass SharedResource {\n    private int data = 0;\n    private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();\n\n    public void write(int value) {\n        lock.writeLock().lock();  // Acquire write lock\n        try {\n            data = value;\n            System.out.println(\"Data written: \" + value);\n        } finally {\n            lock.writeLock().unlock();  // Release write lock\n        }\n    }\n\n    public int read() {\n        lock.readLock().lock();  // Acquire read lock\n        try {\n            System.out.println(\"Data read: \" + data);\n            return data;\n        } finally {\n            lock.readLock().unlock();  // Release read lock\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        SharedResource resource = new SharedResource();\n\n        // Writer thread\n        Thread writer = new Thread(() -> resource.write(42));\n\n        // Reader threads\n        Thread reader1 = new Thread(() -> resource.read());\n        Thread reader2 = new Thread(() -> resource.read());\n\n        writer.start();\n        reader1.start();\n        reader2.start();\n\n        writer.join();\n        reader1.join();\n        reader2.join();\n    }\n}\n
"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#how-it-works","title":"How it Works ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#key-features","title":"Key Features","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#common-problems","title":"Common Problems","text":"Write Starvation

In scenarios with frequent readers, a writer may starve because readers keep acquiring the read lock, delaying the writer indefinitely.

Solution

Use a fair lock

ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);  // Enable fairness\n

Fair locks ensure that waiting writers get a chance to execute after the current readers finish.

DeadLock

If a read lock and write lock are acquired in an inconsistent order across multiple threads, it can lead to deadlock.

Deadlock example

Thread 1: Acquire write lock -> Attempt to acquire read lock (blocks)\nThread 2: Acquire read lock -> Attempt to acquire write lock (blocks)\n

Solution

Performance Degradation with Too Many Write Operations

If there are frequent write operations, the system behaves similarly to using a normal ReentrantLock, as readers must wait for writers to release the lock.

Solution

Use lock-free data structures (like AtomicReference) or ReadWriteLock only when reads significantly outnumber writes.

Incorrect Use of Lock Downgrading

If a thread holding the write lock tries to release it before acquiring the read lock, data inconsistencies can occur.

Correct Lock Downgrading Example

lock.writeLock().lock();\ntry {\n    // Write critical section\n    lock.readLock().lock();  // Downgrade to read lock\n} finally {\n    lock.writeLock().unlock();  // Release write lock\n}\n// Perform read operations under the read lock.\n
"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#when-to-use","title":"When to Use ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#summary","title":"Summary","text":"

ReentrantReadWriteLock is a powerful tool that allows multiple threads to read concurrently while ensuring exclusive access for writes. However, it is most effective in read-heavy scenarios. Understanding potential issues like write starvation, deadlocks, and performance degradation is essential for using this lock effectively, by following best practices like consistent lock ordering, minimizing lock duration, and monitoring lock usage, you can avoid common pitfalls and maximize the performance benefits of ReentrantReadWriteLock.

"},{"location":"langdives/Java/LockingIntrinsicReentrant/","title":"synchronized vs ReentrantLock","text":""},{"location":"langdives/Java/LockingIntrinsicReentrant/#differences","title":"Differences","text":"Feature synchronized ReentrantLock Basic Concept Uses intrinsic lock (monitor) on objects. Uses an explicit lock from java.util.concurrent.locks. Lock Acquisition Acquired implicitly when entering a synchronized block or method. Acquired explicitly via lock() method. Release of Lock Automatically released when the thread exits the synchronized block or method. Must be explicitly released via unlock(). Reentrancy Supports reentrancy (same thread can acquire the same lock multiple times). Supports reentrancy just like synchronized. Fairness Unfair by default (no control over thread access order). Can be fair or unfair (configurable with ReentrantLock(true)). Interruptibility Cannot respond to interrupts while waiting for the lock. Supports interruptible locking via lockInterruptibly(). Try Locking Not supported. A thread will block indefinitely if the lock is not available. Supports tryLock() to attempt locking without blocking or with timeout. Condition Variables Uses wait() / notify() / notifyAll() methods on the intrinsic lock. Supports multiple Condition objects for finer-grained wait/notify control. Timeout Support Not supported. If the lock is held by another thread, it will wait indefinitely. Supports timeout locking with tryLock(long timeout, TimeUnit unit). Performance Overhead Low for simple scenarios with little contention. Higher overhead but provides greater control over locking behavior. Fair Locking Option Not supported (always unfair). Fair locking can be enabled with ReentrantLock(true). Use in Non-blocking Operations Not possible. Possible with tryLock() (non-blocking). Flexibility and Control Limited to synchronized methods or blocks. Greater flexibility: lock multiple sections, lock only part of a method, or use multiple conditions. Suitability for Deadlock Avoidance Requires external logic to prevent deadlocks (acquire locks in the same order). Easier to prevent deadlocks using tryLock() and timeouts. Memory Usage No additional memory overhead. Uses the object\u2019s monitor. Requires additional memory for lock objects and lock metadata. Readability and Simplicity Easier to read and maintain (especially for small, simple use cases). More complex code with explicit lock management. Error Handling No need to manage lock release in a finally block. The lock is automatically released. Requires explicit unlock() in finally blocks to avoid deadlocks or memory leaks. Thread Starvation Prone to thread starvation in high contention scenarios. Can prevent starvation using fair lock mode. Recommended Use Case Best for simple synchronization needs where you don\u2019t need advanced control. Recommended for complex concurrency scenarios needing fine-grained locking, fairness, tryLock, or interruptibility."},{"location":"langdives/Java/LockingIntrinsicReentrant/#when-to-use","title":"When to Use ?","text":"Use synchronized Use ReentrantLock When you need simple, block-level or method-level synchronization. When you need advanced control over locking behavior (e.g., tryLock, fairness, or interruptibility). When you want automatic lock release (less error-prone). When you need multiple locks or condition variables. When performance matters in low-contention scenarios (lower overhead). When dealing with high contention and you need fair scheduling to prevent starvation. When you don't need non-blocking operations or timeouts. When you want non-blocking operations using tryLock() or timeout-based locking. When the code needs to be simple and easy to read. When code complexity is acceptable for greater flexibility."},{"location":"langdives/Java/LockingIntrinsicReentrant/#summary","title":"Summary","text":"

Both synchronized and ReentrantLock have their own strengths and use cases. Use synchronized for simpler, lower-level concurrency needs, and ReentrantLock when you need more control, fairness, or advanced features like non-blocking locking and condition variables.

In general: - synchronized is easier to use and less error-prone. - ReentrantLock is more powerful and flexible, but with more overhead and complexity.

"},{"location":"langdives/Java/Maven/","title":"Maven","text":""},{"location":"langdives/Java/Maven/#what-is-maven","title":"What is Maven ?","text":"

Apache Maven is a build automation and project management tool primarily for Java projects. It uses XML (pom.xml) to describe the project's structure, dependencies, and build lifecycle. Maven focuses on the \u201cconvention over configuration\u201d principle, meaning it provides a standard way to structure and build projects with minimal configuration.

"},{"location":"langdives/Java/Maven/#how-maven-works","title":"How Maven Works ?","text":"

Maven operates using a build lifecycle consisting of pre-defined phases. When you execute a specific phase, all preceding phases are executed as well.

Maven Lifecycle Phases Phase Description validate Validates the project structure. compile Compiles the source code. test Runs the unit tests. package Packages the compiled code into a JAR or WAR. verify Verifies the package meets specifications. install Installs the JAR into the local Maven repository. deploy Deploys the artifact to a remote repository.

Maven revolves around the POM (Project Object Model), which defines:

We will understand pom.xml in next section more.

"},{"location":"langdives/Java/Maven/#understanding-pomxml","title":"Understanding pom.xml","text":"

The POM (Project Object Model) file is the heart of a Maven project. It defines dependencies, build plugins, and project metadata.

Basic Example of pom.xml

<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n        xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 \n                            http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n    <modelVersion>4.0.0</modelVersion>\n\n    <groupId>com.example</groupId>\n    <artifactId>my-app</artifactId>\n    <version>1.0-SNAPSHOT</version>\n    <packaging>jar</packaging>\n\n    <dependencies>\n        <dependency>\n            <groupId>org.apache.commons</groupId>\n            <artifactId>commons-lang3</artifactId>\n            <version>3.12.0</version>\n        </dependency>\n    </dependencies>\n\n    <build>\n        <plugins>\n            <plugin>\n                <groupId>org.apache.maven.plugins</groupId>\n                <artifactId>maven-compiler-plugin</artifactId>\n                <version>3.8.1</version>\n                <configuration>\n                    <source>1.8</source>\n                    <target>1.8</target>\n                </configuration>\n            </plugin>\n        </plugins>\n    </build>\n</project>\n

Key Components of pom.xml

"},{"location":"langdives/Java/Maven/#dependency-management","title":"Dependency Management","text":"

Maven simplifies dependency management by automatically downloading required libraries from repositories.

Scopes of Dependencies

Example Dependency Declaration

<dependency>\n    <groupId>org.apache.commons</groupId>\n    <artifactId>commons-lang3</artifactId>\n    <version>3.12.0</version>\n</dependency>\n
"},{"location":"langdives/Java/Maven/#maven-repositories","title":"Maven Repositories","text":"

Maven resolves dependencies from repositories:

Adding a Custom Repository

<repositories>\n    <repository>\n        <id>my-repo</id>\n        <url>https://my-repo-url</url>\n    </repository>\n</repositories>\n
"},{"location":"langdives/Java/Maven/#maven-plugins","title":"Maven Plugins","text":"

Maven plugins extend its functionality. Plugins can handle tasks such as compiling, testing, or packaging.

Maven Compiler Plugin

<plugin>\n    <groupId>org.apache.maven.plugins</groupId>\n    <artifactId>maven-compiler-plugin</artifactId>\n    <version>3.8.1</version>\n    <configuration>\n        <source>1.8</source>\n        <target>1.8</target>\n    </configuration>\n</plugin>\n
This plugin ensures the source code is compiled with Java 8.

"},{"location":"langdives/Java/Maven/#maven-project-structure","title":"Maven Project Structure","text":"Maven recommended standard directory structure
/my-project\n\u2502\n\u251c\u2500\u2500 pom.xml               # Project Object Model configuration\n\u251c\u2500\u2500 src\n\u2502   \u2514\u2500\u2500 main\n\u2502       \u2514\u2500\u2500 java          # Source code\n\u2502   \u2514\u2500\u2500 test\n\u2502       \u2514\u2500\u2500 java          # Unit tests\n\u2514\u2500\u2500 target                # Output directory (JAR, WAR)\n

Maven supports multi-module projects, allowing multiple related projects to be managed together.

Directory Structure
/parent-project\n\u2502\n\u251c\u2500\u2500 pom.xml (parent)\n\u251c\u2500\u2500 module-1/\n\u2502   \u2514\u2500\u2500 pom.xml\n\u2514\u2500\u2500 module-2/\n    \u2514\u2500\u2500 pom.xml\n
The parent pom.xml defines the modules:
<modules>\n    <module>module-1</module>\n    <module>module-2</module>\n</modules>\n
Building all modules
mvn install\n
"},{"location":"langdives/Java/Maven/#maven-wrapper-mvnw","title":"Maven Wrapper (mvnw)","text":"

Similar to Gradle, Maven has a wrapper (mvnw) that ensures the project uses a specific Maven version.

Add Maven Wrapper
mvn -N io.takari:maven:wrapper\n
"},{"location":"langdives/Java/Maven/#maven-commands","title":"Maven Commands","text":"

Here are some common Maven commands

Command Description mvn compile Compiles the source code. mvn test Runs unit tests. mvn package Packages the code into a JAR/WAR. mvn install Installs the artifact to the local repository. mvn deploy Deploys the artifact to a remote repository. mvn clean Cleans the target/ directory. mvn dependency:tree Displays the project's dependency tree."},{"location":"langdives/Java/Maven/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Maven/#summary","title":"Summary","text":"

Maven is a mature, stable tool that simplifies building and managing Java applications. Its focus on conventions reduces the need for complex configurations, making it ideal for enterprise projects. While Maven may lack some of the flexibility and speed of Gradle, it is widely used in large organizations for its reliability and standardization. For projects requiring strict conventions and extensive dependency management, Maven remains a popular choice.

"},{"location":"langdives/Java/MavenVsGradle/","title":"Maven vs Gradle","text":""},{"location":"langdives/Java/MavenVsGradle/#comparision","title":"Comparision","text":"Aspect Maven Gradle Configuration Style Uses XML (pom.xml). Uses Groovy/Kotlin DSL (build.gradle). Performance Slower, especially for large projects (no build caching). Faster with incremental builds and caching. Flexibility Follows convention over configuration, less customizable. Highly customizable, supports custom build logic. Dependency Management Maven Central and custom repositories. Supports Maven Central, JCenter, Ivy, and custom repositories. Plugin System Pre-built Maven plugins (strict lifecycle integration). More flexible plugins with multiple custom task types. Build Output Produces JARs, WARs, and other artifacts. Produces JARs, WARs, and custom artifacts more easily. Multi-Project Support Good for enterprise projects with structured multi-module builds. Excellent for multi-module projects, especially in complex setups. Integration with CI/CD Easily integrates with Jenkins, GitHub Actions, Bamboo. Same level of integration with Jenkins, CircleCI, GitHub Actions. Use in Android Development Not suitable. Preferred build tool for Android development. Incremental Builds Not supported. Supported, resulting in faster builds. Offline Mode Uses the local Maven repository (~/.m2/repository). Uses a local cache (~/.gradle/caches/) and has offline mode. Version Control of Build Tool Maven Wrapper (mvnw) ensures consistent versions. Gradle Wrapper (gradlew) ensures consistent versions. Preferred Projects Enterprise Java applications with well-defined standards. Android apps, complex and large projects with custom build requirements."},{"location":"langdives/Java/MavenVsGradle/#when-to-use-gradle","title":"When to Use Gradle ?","text":""},{"location":"langdives/Java/MavenVsGradle/#when-to-use-maven","title":"When to Use Maven ?","text":""},{"location":"langdives/Java/MavenVsGradle/#advantages-of-gradle","title":"Advantages of Gradle","text":""},{"location":"langdives/Java/MavenVsGradle/#advantages-of-maven","title":"Advantages of Maven","text":""},{"location":"langdives/Java/MavenVsGradle/#which-tool-is-preferred","title":"Which Tool is Preferred?","text":""},{"location":"langdives/Java/MavenVsGradle/#where-gradle-maven-fit","title":"Where Gradle Maven Fit ?","text":"Component Role Gradle / Maven Interaction JDK (Java Development Kit) Provides tools to compile Java code into bytecode. Gradle and Maven use the JDK compiler (javac) to build code. JVM (Java Virtual Machine) Runs the compiled bytecode (.class files). Gradle/Maven can execute unit tests and applications on the JVM. JRE (Java Runtime Environment) Provides the libraries required to run Java applications. The output artifacts (e.g., JAR/WAR) produced by Gradle/Maven require the JRE to run. "},{"location":"langdives/Java/MavenVsGradle/#summary","title":"Summary","text":"

In conclusion, both Maven and Gradle are excellent tools, and the choice depends on the project requirements. For enterprise applications, Maven remains a solid choice. For Android apps, large multi-module projects, or performance-critical builds, Gradle stands out as the preferred option.

"},{"location":"langdives/Java/MemoryModel/","title":"Java Memory Model","text":"

Java uses a memory model that divides memory into different areas, primarily the heap and stack.

"},{"location":"langdives/Java/MemoryModel/#heap-memory","title":"Heap Memory","text":"

The heap is mainly used for dynamic memory allocation. Objects created using the new keyword are stored in the heap. Coming to it's life time objects in the heap remain in memory until they are no longer referenced and are garbage collected. This means the lifetime of an object is not tied to the scope of a method and Accessing memory in the heap is slower than in the stack due to its dynamic nature and the potential for fragmentation.

Note

"},{"location":"langdives/Java/MemoryModel/#stack-memory","title":"Stack Memory","text":"

The stack is mainly used for static memory allocation. It stores method call frames, which contain local variables, method parameters, and return addresses. coming to the lifetime of a variable in the stack is limited to the duration of the method call. Once the method returns, the stack frame is popped off, and the memory is reclaimed and accessing stack memory is faster than heap memory because it follows a Last In, First Out (LIFO) order, allowing for quick allocation and deallocation.

Note

"},{"location":"langdives/Java/MemoryModel/#example","title":"Example","text":"

Example

public class MemoryExample {\n    public static void main(String[] args) {\n        int localVar = 10; // Stack memory\n\n        MemoryExample obj = new MemoryExample(); // Heap memory\n        obj.display(localVar); // Passing parameter, stack memory\n    }\n\n    public void display(int param) { // Stack memory\n        System.out.println(param);\n        String message = \"Hello\"; // Heap memory (String object)\n    }\n}\n
"},{"location":"langdives/Java/MemoryModel/#differences","title":"Differences","text":"Feature Heap Stack Allocation Dynamic Static Lifetime Until garbage collected Duration of method call Memory Size Larger (configurable) Smaller (configurable) Access Speed Slower Faster Data Type Objects, arrays Primitive types, references Management Garbage collection Automatically managed by JVM"},{"location":"langdives/Java/PrimitiveReferenceTypes/","title":"Primitive and Reference Types","text":""},{"location":"langdives/Java/PrimitiveReferenceTypes/#primitive-types","title":"Primitive Types","text":"

Java has 8 primitive data types that store simple values directly in memory.

Type Size Default Value Range Example byte 1 byte (8 bits) 0 -128 to 127 byte b = 100; short 2 bytes (16 bits) 0 -32,768 to 32,767 short s = 30000; int 4 bytes (32 bits) 0 -2^31 to (2^31)-1 int i = 100000; long 8 bytes (64 bits) 0L -2^63 to (2^63)-1 long l = 100000L; float 4 bytes (32 bits) 0.0f ~\u00b13.4E38 (7 decimal digits precision) float f = 3.14f; double 8 bytes (64 bits) 0.0 ~\u00b11.8E308 (15 decimal digits precision) double d = 3.14159; char 2 bytes (16 bits) '\\u0000' Unicode characters (0 to 65,535) char c = 'A'; boolean 1 bit (virtual) false true or false boolean b = true;"},{"location":"langdives/Java/PrimitiveReferenceTypes/#reference-types","title":"Reference Types","text":"

Reference types store references (addresses) to objects in memory, unlike primitive types that store values directly.

Primitive Wrapper Class byte Byte short Short int Integer long Long float Float double Double char Character boolean Boolean"},{"location":"langdives/Java/PrimitiveReferenceTypes/#differences","title":"Differences","text":"Aspect Primitive Types Reference Types Storage Store actual values. Store references to objects in memory. Memory Allocation Stored in stack memory. Stored in heap memory. Default Values Zero/false equivalents. null for uninitialized references. Examples int, char, boolean. String, Arrays, Classes, Interfaces, etc."},{"location":"langdives/Java/ReferenceTypesInDepth/","title":"Reference Types In Depth.","text":"

Let's deep dive to understand How memory management, object references, and behaviors work in Java, with a focus on String handling and other reference types like arrays, classes, and wrapper objects.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#intro","title":"Intro","text":""},{"location":"langdives/Java/ReferenceTypesInDepth/#string","title":"String","text":"

The String class in Java is a special reference type with some unique behaviors. Strings are immutable once a String object is created, it cannot be changed. Any modification on a String results in the creation of a new object in memory.

String Pool (Interned Strings): A special memory area inside the heap used to store string literals. If a string literal like \"Hello\" is created, Java first checks the string pool to see if it already exists. If it does, it returns the reference from the pool. If not, the string is added to the pool.

Example
String s1 = \"Hello\";  // Stored in the String Pool\nString s2 = \"Hello\";  // s2 points to the same object as s1\n\nSystem.out.println(s1 == s2);  // true (same reference)\n

Heap Memory: When you use the new keyword, a new String object is always created in the heap memory. Even if the same string already exists in the string pool, the new keyword forces the creation of a separate instance in the heap.

Example

String s1 = new String(\"Hello\"); // creates a new object outside the pool in the heap.\n\nString s2 = \"Hello\"; // Stored in the String Pool\n\nSystem.out.println(s1 == s2);  // false (different references)\n
When you use new String(), Java forces the creation of a new object in heap even if the same string exists in the pool.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#arrays","title":"Arrays","text":"

Arrays are reference types, meaning the array variable stores a reference to the memory location where the array data is stored.

Example
int[] arr1 = {1, 2, 3};\nint[] arr2 = arr1;  // arr2 now references the same array as arr1\n\narr2[0] = 10;  // Modifies the original array\n\nSystem.out.println(arr1[0]);  // Output: 10 (both arr1 and arr2 reference the same array)\n

How Array References Work:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#classes-and-objects","title":"Classes and Objects","text":"

When you create an object using new, the reference variable points to the object in heap memory.

Example
class Person {\n    String name;\n}\n\nPerson p1 = new Person();\np1.name = \"Alice\";\n\nPerson p2 = p1;  // p2 points to the same object as p1\np2.name = \"Bob\";\n\nSystem.out.println(p1.name);  // Output: Bob (both references point to the same object)\n

How References Work with Objects:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#wrapper-classes","title":"Wrapper Classes","text":"

Wrapper classes (Integer, Double, Boolean, etc.) wrap primitive types into objects. These are reference types, and Java performs autoboxing/unboxing to convert between primitive types and wrapper objects.

Example
Integer num1 = 100;\nInteger num2 = 100;\n\nSystem.out.println(num1 == num2);  // true (for values within -128 to 127)\n\nInteger num3 = 200;\nInteger num4 = 200;\n\nSystem.out.println(num3 == num4);  // false (new objects for values beyond 127)\n

Wrapper Caching

"},{"location":"langdives/Java/ReferenceTypesInDepth/#reference-and-deep-copy","title":"Reference and Deep Copy","text":"

Shallow Copy: Copies only the reference, so both variables refer to the same object.

Example
int[] original = {1, 2, 3};\nint[] shallowCopy = original;  // Points to the same array\n\nshallowCopy[0] = 100;\nSystem.out.println(original[0]);  // Output: 100\n

Deep Copy: Creates a new object with the same data.

Example
int[] original = {1, 2, 3};\nint[] deepCopy = original.clone();  // Creates a new array\n\ndeepCopy[0] = 100;\nSystem.out.println(original[0]);  // Output: 1\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#nullnullpointerexception","title":"Null/NullPointerException","text":"

When a reference is not initialized, it holds the value null. Accessing a field or method on a null reference throws a NullPointerException.

Example
Person p = null;\nSystem.out.println(p.name);  // Throws NullPointerException\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#garbage-collection","title":"Garbage Collection","text":"

Java uses Garbage Collection to manage memory. When no references point to an object, it becomes eligible for garbage collection.

Example
Person p1 = new Person();  // Object created\np1 = null;  // Now eligible for garbage collection\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#summary","title":"Summary","text":""},{"location":"langdives/Java/ReferenceTypesInDepth/#string-pool-in-depth","title":"String Pool In Depth","text":"

The String Pool (also called the intern pool) in Java is implemented using a Hash Table-like data structure internally. Let\u2019s explore the design and behavior behind this structure:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#internals","title":"Internals","text":" Simplified conceptual pseudocode Example

How the pool works internally

class StringPool {\n    private static Map<String, String> pool = new HashMap<>();\n\n    public static String intern(String str) {\n        if (pool.containsKey(str)) {\n            return pool.get(str);  // Return existing reference\n        } else {\n            pool.put(str, str);    // Add to the pool\n            return str;\n        }\n    }\n}\n
- When calling String.intern(), Java interns the string, meaning it adds the string to the pool if it's not already present.

String Pool Usage Example
public class Main {\n    public static void main(String[] args) {\n        String s1 = new String(\"Hello\");\n        String s2 = s1.intern();  // Adds \"Hello\" to the pool, if not already present\n\n        String s3 = \"Hello\";  // Uses the interned string from the pool\n\n        System.out.println(s2 == s3);  // true (same reference from the pool)\n    }\n}\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#why-use-hash-table","title":"Why Use Hash Table ?","text":"

Key Takeaways

"},{"location":"langdives/Java/ReferenceTypesInDepth/#string-pool-summary","title":"String pool Summary","text":"

The String Pool is implemented using a Hash Table-like data structure, This allows for efficient string reuse through fast lookups and ensures no duplicate literals are created. Strings added via literals or intern() are stored in the pool, with existing references returned on subsequent requests.

"},{"location":"langdives/Java/StreamsLambdas/","title":"Streams and Lambdas","text":""},{"location":"langdives/Java/StreamsLambdas/#lambda-expressions","title":"Lambda Expressions","text":"

Enables functional programming by treating functions as first-class citizens.

Example
List<String> names = Arrays.asList(\"Alice\", \"Bob\", \"Charlie\");\nnames.forEach(name -> System.out.println(name));\n
"},{"location":"langdives/Java/StreamsLambdas/#functional-interfaces","title":"Functional Interfaces","text":"

A functional interface is an interface with only one abstract method. This is important because lambda expressions can be used to provide the implementation for these interfaces.

Example Example functional interface
@FunctionalInterface  // Optional but ensures the interface has only one abstract method.\ninterface MyFunction {\n    int apply(int a, int b);  // Single abstract method\n}\n

Now, when you want to use this interface, you don\u2019t need to create a class and provide an implementation like before. Instead, you can use a lambda expression to quickly provide the logic.

Using Lambda with MyFunction
MyFunction addition = (a, b) -> a + b;  // Lambda expression for addition\nSystem.out.println(addition.apply(5, 3));  // Output: 8\n
Explanation "},{"location":"langdives/Java/StreamsLambdas/#method-references","title":"Method References","text":"

A method reference is a shorthand way of writing a lambda when a method already exists that matches the lambda\u2019s purpose. This makes the code more concise and readable.

Example with forEach and Method Reference

Consider the following list of names:

List<String> names = Arrays.asList(\"Alice\", \"Bob\", \"Charlie\");\n

You want to print all names using forEach(). You could do it with a lambda like this:

names.forEach(name -> System.out.println(name));  // Lambda expression\n

Now, Java provides a shorthand: Method Reference. Since System.out.println() already matches the structure (String) -> void, you can write:

names.forEach(System.out::println);  // Method reference\n

Explanation

Use method references when:

More Examples
// 1. Static method reference\nFunction<String, Integer> parse = Integer::parseInt;\nSystem.out.println(parse.apply(\"123\"));  // Output: 123\n\n// 2. Instance method reference on an arbitrary object\nList<String> words = Arrays.asList(\"one\", \"two\", \"three\");\nwords.sort(String::compareToIgnoreCase);  // Sorts case-insensitively\n
"},{"location":"langdives/Java/StreamsLambdas/#streams-api","title":"Streams API","text":"

Introduced in Java 8 to process collections in a declarative way.

Core Stream Operations

"},{"location":"langdives/Java/StreamsLambdas/#creation","title":"Creation","text":"
Stream<Integer> stream = Stream.of(1, 2, 3, 4);\nList<String> list = Arrays.asList(\"A\", \"B\", \"C\");\nStream<String> streamFromList = list.stream();\n
"},{"location":"langdives/Java/StreamsLambdas/#intermediate-operations-return-new-streams-lazy-evaluation","title":"Intermediate Operations (return new streams, lazy evaluation)","text":"filter()
// Filters elements based on a predicate.\nList<Integer> evenNumbers = stream.filter(n -> n % 2 == 0).toList();\n
map()
// Transforms elements.\nList<Integer> lengths = list.stream().map(String::length).toList();\n
sorted()
// Sorts elements.\nList<Integer> sortedList = stream.sorted().toList();\n
"},{"location":"langdives/Java/StreamsLambdas/#terminal-operations-trigger-computation","title":"Terminal Operations (trigger computation)","text":"forEach()
// Iterates through elements.\nlist.stream().forEach(System.out::println);\n
collect()
// Collects elements into a collection.\nList<String> newList = list.stream().filter(s -> s.startsWith(\"A\")).collect(Collectors.toList());\n
reduce()
// Reduces the elements to a single result.\nint sum = Stream.of(1, 2, 3, 4).reduce(0, Integer::sum);\n
"},{"location":"langdives/Java/StreamsLambdas/#parallel-streams","title":"Parallel Streams","text":"
// Used to process elements in parallel for better performance.\nlist.parallelStream().forEach(System.out::println);\n
"},{"location":"langdives/Java/StreamsLambdas/#examples-streamslambdas","title":"Examples Streams/Lambdas","text":"Find the sum of even numbers
int sumOfEvens = Stream.of(1, 2, 3, 4, 5, 6)\n                      .filter(n -> n % 2 == 0)\n                      .reduce(0, Integer::sum);\nSystem.out.println(sumOfEvens);  // Output: 12\n
Convert List of Strings to Uppercase
List<String> upperCaseNames = list.stream()\n                                  .map(String::toUpperCase)\n                                  .collect(Collectors.toList());\n
Group elements by length
Map<Integer, List<String>> groupedByLength = list.stream()\n                                                .collect(Collectors.groupingBy(String::length));\n
"},{"location":"langdives/Java/StreamsLambdas/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/ThreadPoolTuning/","title":"Thread Pool Configuration Tuning","text":"

Thread pool configuration is critical for optimizing the performance of your applications. Poorly configured thread pools can lead to problems such as CPU starvation, thread contention, memory exhaustion, or poor resource utilization. In this Article, we\u2019ll dive deep into CPU-bound vs I/O-bound tasks, explore how to determine optimal thread pool sizes, and discuss key considerations such as queue types and rejection policies.

"},{"location":"langdives/Java/ThreadPoolTuning/#cpu-vs-io-bound-tasks","title":"CPU vs I/O Bound Tasks","text":"

When configuring thread pools, it is essential to classify your tasks as CPU-bound or I/O-bound, as this distinction guides the number of threads your pool should maintain.

"},{"location":"langdives/Java/ThreadPoolTuning/#cpu-bound-tasks","title":"CPU-Bound Tasks","text":"

Tasks that perform intensive computations (e.g., mathematical calculations, data processing, encoding), and here limiting factor is the CPU core availability. So its better to avoid context switching overhead by keeping the number of threads close to the available CPU cores.

Optimal Thread Pool Size for CPU-Bound Tasks
int coreCount = Runtime.getRuntime().availableProcessors();\nExecutorService cpuBoundPool = Executors.newFixedThreadPool(coreCount);\n

Note

If more threads than CPU cores are running, threads will compete for CPU cycles, causing context switching, which adds overhead.

Optimal Threads = Number of Cores\n

When to use ? "},{"location":"langdives/Java/ThreadPoolTuning/#io-bound-tasks","title":"I/O-Bound Tasks","text":"

Tasks that spend most of the time waiting for I/O operations (e.g., network, database, file I/O). and here the limiting factor is the time spent waiting on I/O. So it's better to use more threads than the number of cores to ensure that idle CPU cycles are used efficiently while waiting for I/O.

Optimal Thread Pool Size for I/O-Bound Tasks
int coreCount = Runtime.getRuntime().availableProcessors();\nint optimalThreads = coreCount * 2 + 1;\nExecutorService ioBoundPool = Executors.newFixedThreadPool(optimalThreads);\n

Note

Since the tasks spend significant time waiting for I/O, more threads can be created to make sure the CPU is not idle while other threads wait for input/output operations.

Optimal Threads = Number of Cores * (1 + Wait Time / Compute Time)\n

When to use ? "},{"location":"langdives/Java/ThreadPoolTuning/#queues-for-threadpoolexecutor","title":"Queues for ThreadPoolExecutor","text":"

Choosing the right work queue is crucial for memory management and task scheduling. The queue holds tasks waiting to be executed when all threads are busy.

"},{"location":"langdives/Java/ThreadPoolTuning/#unbounded-queue","title":"Unbounded Queue","text":"

A queue with no size limit, but if too many tasks are submitted, it can lead to memory exhaustion (out-of-memory errors).

LinkedBlockingQueue
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();\n
When to use ?

Suitable only if you expect tasks to complete quickly and the queue will not grow indefinitely.

"},{"location":"langdives/Java/ThreadPoolTuning/#bounded-queue","title":"Bounded Queue","text":"

A queue with a fixed size limit, it prevents unbounded memory usage, and If the queue is full, tasks will be rejected or handled based on a rejection policy.

ArrayBlockingQueue
BlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(10);\n
When to use ?

Ideal for controlled environments where you want to cap the number of waiting tasks.

"},{"location":"langdives/Java/ThreadPoolTuning/#thread-pool-size-tuning","title":"Thread Pool Size Tuning","text":"For CPU-Bound Tasks
Optimal Threads = Number of Cores\n
For I/O-Bound Tasks
Optimal Threads = Number of Cores * (1 + Wait Time / Compute Time)\n
Example

If a thread spends 70% of the time waiting on I/O, and only 30% performing work:

Optimal Threads = 4 * (1 + 0.7 / 0.3) = 12\n

"},{"location":"langdives/Java/ThreadPoolTuning/#rejection-policies","title":"Rejection Policies","text":"

When the task queue is full and the pool is at its maximum size, the ThreadPoolExecutor must decide what to do with new tasks. You can configure rejection policies to handle these situations.

"},{"location":"langdives/Java/ThreadPoolTuning/#abortpolicy-default","title":"AbortPolicy (Default)","text":""},{"location":"langdives/Java/ThreadPoolTuning/#callerrunspolicy","title":"CallerRunsPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#discardpolicy","title":"DiscardPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#discardoldestpolicy","title":"DiscardOldestPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#monitoring-thread-pools","title":"Monitoring Thread Pools","text":"

Monitoring thread pools ensures that your configuration is correct and performing well. You can monitor the following metrics:

Key Metrics to Monitor

Example: Monitoring Active Threads
ThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS,\n      new ArrayBlockingQueue<>(2));\n\nSystem.out.println(\"Active Threads: \" + executor.getActiveCount());\nSystem.out.println(\"Task Count: \" + executor.getTaskCount());\nSystem.out.println(\"Completed Tasks: \" + executor.getCompletedTaskCount());\n
"},{"location":"langdives/Java/ThreadPoolTuning/#dynamic-thread-pool-adjustment","title":"Dynamic Thread Pool Adjustment","text":"

Sometimes, you may need to adjust the pool size at runtime to respond to changing workloads.

Example: Adjusting Thread Pool Size Dynamically
ThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS,\n      new ArrayBlockingQueue<>(10));\n\n// Adjust core and max pool size dynamically\nexecutor.setCorePoolSize(3);\nexecutor.setMaximumPoolSize(6);\n
"},{"location":"langdives/Java/ThreadPoolTuning/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/ThreadPools/","title":"Thread Pools.","text":""},{"location":"langdives/Java/ThreadPools/#what-is-a-thread-pool","title":"What is a Thread Pool ?","text":"

A thread pool is a collection of worker threads that are created at the start and reused to perform multiple tasks. When tasks are submitted to the pool, a free thread picks up the task and executes it. If no threads are free, the tasks wait in a queue until one becomes available.

"},{"location":"langdives/Java/ThreadPools/#advantages-of-thread-pooling","title":"Advantages of Thread Pooling","text":""},{"location":"langdives/Java/ThreadPools/#creating-thread-pools","title":"Creating Thread Pools","text":""},{"location":"langdives/Java/ThreadPools/#ways-to-create","title":"Ways to Create","text":""},{"location":"langdives/Java/ThreadPools/#fixed-thread-pool","title":"Fixed Thread Pool","text":"

Creates a pool with a fixed number of threads. When all threads are busy, tasks are placed in a queue and executed as soon as a thread becomes available.

newFixedThreadPool
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class FixedThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(3);\n\n        for (int i = 1; i <= 6; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#cached-thread-pool","title":"Cached Thread Pool","text":"

A dynamic thread pool where threads are created as needed. If threads are idle for 60 seconds, they are terminated. If a thread is available, it will be reused for a new task.

newCachedThreadPool
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class CachedThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newCachedThreadPool();\n\n        for (int i = 1; i <= 5; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? Drawbacks "},{"location":"langdives/Java/ThreadPools/#single-thread-executor","title":"Single Thread Executor","text":"

A single-threaded executor that ensures tasks are executed sequentially in the order they are submitted. If the thread dies due to an exception, a new thread is created to replace it.

newSingleThreadExecutor
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class SingleThreadExecutorExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newSingleThreadExecutor();\n\n        for (int i = 1; i <= 3; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#scheduled-thread-pool","title":"Scheduled Thread Pool","text":"

A scheduled thread pool allows you to schedule tasks to run after a delay or periodically at a fixed rate.

newScheduledThreadPool
import java.util.concurrent.Executors;\nimport java.util.concurrent.ScheduledExecutorService;\nimport java.util.concurrent.TimeUnit;\n\npublic class ScheduledThreadPoolExample {\n    public static void main(String[] args) {\n        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(2);\n\n        Runnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\n\n        // Schedule task to run after 3 seconds\n        scheduler.schedule(task, 3, TimeUnit.SECONDS);\n\n        // Schedule task to run repeatedly every 2 seconds\n        scheduler.scheduleAtFixedRate(task, 1, 2, TimeUnit.SECONDS);\n\n        // Allow the tasks to complete after 10 seconds\n        scheduler.schedule(() -> scheduler.shutdown(), 10, TimeUnit.SECONDS);\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#threadpoolexecutor","title":"ThreadPoolExecutor","text":"

ThreadPoolExecutor is the core implementation of thread pools in Java. Using it allows you to fine-tune the thread pool\u2019s behavior with more control over the number of threads, queue type, and rejection policy.

Parameters of ThreadPoolExecutor
ThreadPoolExecutor executor = new ThreadPoolExecutor(\n        corePoolSize,      // Minimum number of threads\n        maximumPoolSize,   // Maximum number of threads\n        keepAliveTime,     // Idle time before a thread is terminated\n        timeUnit,          // Time unit for keepAliveTime\n        workQueue,         // Queue to hold waiting tasks\n        threadFactory,     // Factory to create new threads\n        handler            // Rejection policy when the queue is full\n);\n
Custom Thread Pool
import java.util.concurrent.*;\n\npublic class CustomThreadPoolExecutorExample {\n    public static void main(String[] args) {\n        ThreadPoolExecutor executor = new ThreadPoolExecutor(\n                2, 4, 30, TimeUnit.SECONDS,\n                new LinkedBlockingQueue<>(2),   // Task queue with capacity 2\n                Executors.defaultThreadFactory(),\n                new ThreadPoolExecutor.CallerRunsPolicy() // Rejection policy\n        );\n\n        // Submit 6 tasks to the pool\n        for (int i = 1; i <= 6; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ?

Common Rejection Policies in ThreadPoolExecutor

"},{"location":"langdives/Java/ThreadPools/#comparison","title":"Comparison","text":"Thread Pool Type Concurrency Parallelism Task Type When to Use Fixed Thread Pool Yes Yes Long-running tasks Limited number of known tasks. Cached Thread Pool Yes Yes Short-lived tasks Dynamic workloads with many I/O tasks. Single Thread Executor No No Sequential tasks Strictly ordered execution. Scheduled Thread Pool Yes Yes Timed or periodic tasks Periodic background tasks. Custom ThreadPoolExecutor Yes Yes Mixed Advanced control and tuning."},{"location":"langdives/Java/ThreadPools/#interface-concepts","title":"Interface Concepts","text":""},{"location":"langdives/Java/ThreadPools/#runnable-interface","title":"Runnable Interface","text":"

The Runnable interface represents a task that can run asynchronously in a thread but does not return any result or throw a checked exception.

Structure
@FunctionalInterface\npublic interface Runnable {\n    void run();\n}\n
Example
public class RunnableExample {\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            System.out.println(\"Executing task in: \" + Thread.currentThread().getName());\n        };\n\n        Thread thread = new Thread(task);\n        thread.start();\n    }\n}\n
When to Use ? "},{"location":"langdives/Java/ThreadPools/#callable-interface","title":"Callable Interface","text":"

The Callable interface is similar to Runnable, but it can return a result and throw a checked exception.

Structure
@FunctionalInterface\npublic interface Callable<V> {\n    V call() throws Exception;\n}\n
Example
import java.util.concurrent.Callable;\n\npublic class CallableExample {\n    public static void main(String[] args) throws Exception {\n        Callable<Integer> task = () -> {\n            System.out.println(\"Executing task in: \" + Thread.currentThread().getName());\n            return 42;\n        };\n\n        // Direct call (for demonstration)\n        Integer result = task.call();\n        System.out.println(\"Task result: \" + result);\n    }\n}\n
When to Use ? "},{"location":"langdives/Java/ThreadPools/#future-interface","title":"Future Interface","text":"

A Future represents the result of an asynchronous computation. It provides methods to check if the computation is complete, wait for the result, and cancel the task if necessary.

Structure
public interface Future<V> {\n    boolean cancel(boolean mayInterruptIfRunning);\n    boolean isCancelled();\n    boolean isDone();\n    V get() throws InterruptedException, ExecutionException;\n    V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;\n}\n
Example
import java.util.concurrent.*;\n\npublic class FutureExample {\n    public static void main(String[] args) throws ExecutionException, InterruptedException {\n        ExecutorService executor = Executors.newSingleThreadExecutor();\n\n        Callable<Integer> task = () -> {\n            Thread.sleep(2000); // Simulate some work\n            return 42;\n        };\n\n        Future<Integer> future = executor.submit(task);\n\n        // Do something else while the task executes asynchronously\n        System.out.println(\"Task is running...\");\n\n        // Wait for the result\n        Integer result = future.get();\n        System.out.println(\"Task result: \" + result);\n\n        executor.shutdown();\n    }\n}\n
When to Use ? Key Methods "},{"location":"langdives/Java/ThreadPools/#blockingqueue-interface","title":"BlockingQueue Interface","text":"

BlockingQueue is a thread-safe queue that blocks the calling thread when:

Structure
public interface BlockingQueue<E> extends Queue<E> {\n    void put(E e) throws InterruptedException;\n    E take() throws InterruptedException;\n    // Other methods for timed operations, size, etc.\n}\n
Example
import java.util.concurrent.*;\n\npublic class BlockingQueueExample {\n    public static void main(String[] args) {\n        BlockingQueue<Integer> queue = new ArrayBlockingQueue<>(2);\n\n        // Producer thread\n        new Thread(() -> {\n            try {\n                queue.put(1);\n                System.out.println(\"Added 1 to the queue\");\n                queue.put(2);\n                System.out.println(\"Added 2 to the queue\");\n                queue.put(3); // This will block until space is available\n                System.out.println(\"Added 3 to the queue\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        }).start();\n\n        // Consumer thread\n        new Thread(() -> {\n            try {\n                Thread.sleep(1000); // Simulate some delay\n                System.out.println(\"Removed from queue: \" + queue.take());\n                System.out.println(\"Removed from queue: \" + queue.take());\n                System.out.println(\"Removed from queue: \" + queue.take());\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        }).start();\n    }\n}\n
Usages ?

Types of BlockingQueues

"},{"location":"langdives/Java/ThreadPools/#runnable-vs-callable","title":"Runnable vs Callable","text":"Aspect Runnable Callable Result No result Returns a result Exception Handling Cannot throw checked exceptions Can throw checked exceptions Functional Interface Yes (run() method) Yes (call() method) Use Case Simple background tasks Tasks that need to return a value or throw an exception"},{"location":"langdives/Java/ThreadPools/#how-these-work-together","title":"How These Work Together","text":"Using Runnable in a Thread Pool
ExecutorService executor = Executors.newFixedThreadPool(2);\nRunnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\nexecutor.execute(task);\nexecutor.shutdown();\n
Using Callable with Future in a Thread Pool
ExecutorService executor = Executors.newFixedThreadPool(2);\nCallable<Integer> task = () -> 42;\nFuture<Integer> future = executor.submit(task);\nSystem.out.println(\"Result: \" + future.get());\nexecutor.shutdown();\n
Using BlockingQueue with ThreadPoolExecutor
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(2);\nThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS, queue);\nRunnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\nexecutor.execute(task);\nexecutor.shutdown();\n
"},{"location":"langdives/Java/Threads-Atomicity/","title":"Atomicity","text":"

Atomicity is a fundamental concept in multithreading and concurrency that ensures operations are executed entirely or not at all, with no intermediate states visible to other threads. In Java, atomicity plays a crucial role in maintaining data consistency in concurrent environments.

This Article covers everything about atomic operations, issues with atomicity, atomic classes in Java, and best practices to ensure atomic behavior in your code.

"},{"location":"langdives/Java/Threads-Atomicity/#what-is-atomicity","title":"What is Atomicity ?","text":"

In a multithreaded program, atomicity guarantees that operations are executed as a single, indivisible unit. When an operation is atomic, it ensures that:

"},{"location":"langdives/Java/Threads-Atomicity/#why-it-is-important","title":"Why it is Important ?","text":"

Without atomic operations, multiple threads could interfere with each other, leading to race conditions and data inconsistencies. For example, if two threads try to increment a shared counter simultaneously, the result may not reflect both increments due to interleaving of operations.

"},{"location":"langdives/Java/Threads-Atomicity/#problems","title":"Problems ?","text":"Non-Atomic Operations on Primitive Data Types Counter Increment Example
class Counter {\n    private int count = 0;\n\n    public void increment() {\n        count++;  // Not atomic\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n

Problem

The statement count++ is not atomic. It consists of three operations

If two threads execute count++ simultaneously, one increment might be lost due to race conditions.

"},{"location":"langdives/Java/Threads-Atomicity/#how-to-ensure-atomicity","title":"How to Ensure Atomicity ?","text":"

Java provides several ways to ensure atomicity, including:

"},{"location":"langdives/Java/Threads-Atomicity/#javas-atomic-classes","title":"Java\u2019s Atomic Classes","text":"

The java.util.concurrent.atomic package offers classes that support lock-free, thread-safe operations on single variables. These classes rely on low-level atomic operations (like CAS \u2014 Compare-And-Swap) provided by the underlying hardware.

"},{"location":"langdives/Java/Threads-Atomicity/#common-atomic-classes","title":"Common Atomic Classes","text":""},{"location":"langdives/Java/Threads-Atomicity/#atomicinteger","title":"AtomicInteger","text":"Example: Solving the Increment Problem
import java.util.concurrent.atomic.AtomicInteger;\n\nclass AtomicCounter {\n    private final AtomicInteger count = new AtomicInteger(0);\n\n    public void increment() {\n        count.incrementAndGet();  // Atomic increment\n    }\n\n    public int getCount() {\n        return count.get();\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        AtomicCounter counter = new AtomicCounter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) {\n                counter.increment();\n            }\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) {\n                counter.increment();\n            }\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n
Explanation "},{"location":"langdives/Java/Threads-Atomicity/#atomicboolean","title":"AtomicBoolean","text":"Example: Managing Flags Safely
import java.util.concurrent.atomic.AtomicBoolean;\n\nclass FlagManager {\n    private final AtomicBoolean isActive = new AtomicBoolean(false);\n\n    public void activate() {\n        if (isActive.compareAndSet(false, true)) {\n            System.out.println(\"Flag activated.\");\n        } else {\n            System.out.println(\"Flag already active.\");\n        }\n    }\n\n    public void deactivate() {\n        if (isActive.compareAndSet(true, false)) {\n            System.out.println(\"Flag deactivated.\");\n        } else {\n            System.out.println(\"Flag already inactive.\");\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        FlagManager manager = new FlagManager();\n\n        Thread t1 = new Thread(manager::activate);\n        Thread t2 = new Thread(manager::activate);\n\n        t1.start();\n        t2.start();\n    }\n}\n
Explanation

compareAndSet() changes the flag only if it matches the expected value, ensuring thread safety.

"},{"location":"langdives/Java/Threads-Atomicity/#atomicreference","title":"AtomicReference","text":"Example: Atomic Operations on Objects
import java.util.concurrent.atomic.AtomicReference;\n\nclass Person {\n    String name;\n\n    Person(String name) {\n        this.name = name;\n    }\n}\n\npublic class AtomicReferenceExample {\n    public static void main(String[] args) {\n        AtomicReference<Person> personRef = new AtomicReference<>(new Person(\"Alice\"));\n\n        // Atomic update of the reference\n        personRef.set(new Person(\"Bob\"));\n        System.out.println(\"Updated Person: \" + personRef.get().name);\n    }\n}\n

When to Use ?

Use AtomicReference when you need atomic operations on object references.

"},{"location":"langdives/Java/Threads-Atomicity/#atomicstampedreference","title":"AtomicStampedReference","text":"

The ABA problem occurs when a value changes from A to B and then back to A. AtomicStampedReference solves this by associating a version (stamp) with the value.

Example: ABA problem prevention
import java.util.concurrent.atomic.AtomicStampedReference;\n\npublic class AtomicStampedReferenceExample {\n    public static void main(String[] args) {\n        AtomicStampedReference<Integer> ref = new AtomicStampedReference<>(1, 0);\n\n        int[] stamp = new int[1];\n        Integer value = ref.get(stamp);\n        System.out.println(\"Initial Value: \" + value + \", Stamp: \" + stamp[0]);\n\n        boolean success = ref.compareAndSet(1, 2, stamp[0], stamp[0] + 1);\n        System.out.println(\"CAS Success: \" + success + \", New Value: \" + ref.get(stamp) + \", New Stamp: \" + stamp[0]);\n    }\n}\n
Explanation

AtomicStampedReference ensures that the same value change does not go undetected by tracking the version.

"},{"location":"langdives/Java/Threads-Atomicity/#performance","title":"Performance ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#when-to-use","title":"When to Use ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#limitations","title":"Limitations ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Threads-Atomicity/#summary","title":"Summary","text":"

The atomic classes in Java\u2019s java.util.concurrent.atomic package offer lock-free, thread-safe operations that are ideal for simple state management. By ensuring atomicity, these classes help avoid race conditions and improve the performance and scalability of multithreaded applications. However, they are best suited for single-variable updates for more complex operations, locks or transactional mechanisms may still be necessary.

"},{"location":"langdives/Java/Threads/","title":"Threads","text":"

Java offers multithreading to perform multiple tasks concurrently, improving performance and responsiveness. This deep dive covers every key concept of Java threading with detailed explanations and code examples.

Before that let's have a quick rewind of fundamental concept of concurrency and parallelism.

"},{"location":"langdives/Java/Threads/#concurrency-and-parallelism","title":"Concurrency and Parallelism","text":"

Concurrency: Multiple tasks start, run, and complete in overlapping time periods (not necessarily simultaneously).

Parallelism: Multiple tasks run exactly at the same time (requires multi-core processors).

We have another article where we gone through fundamentals of concurrency and parallelism in depth though we cover some of the stuff here to but its recommeneded to go through this artice Concurrency and Parallelism

Java achieves both using threads, thread pools, and various libraries such as Executors, Fork/Join Framework, and Streams API, We will go through them one by one and in this article we mostly cover Threads.

"},{"location":"langdives/Java/Threads/#what-is-a-thread","title":"What is a Thread?","text":"

A thread is a lightweight sub-process. A Java program has at least one thread \u2014 the main thread, which starts with the main() method. You can create additional threads to execute code concurrently. Each thread shares the same process memory, but has its own stack, registers, and program counter.

"},{"location":"langdives/Java/Threads/#how-to-create","title":"How to Create ?","text":"

You can create a thread in two ways:

Extending the Thread class
class MyThread extends Thread {\n    public void run() {\n        System.out.println(\"Thread running: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        MyThread t1 = new MyThread();\n        t1.start();  // Start the thread\n    }\n}\n
Implementing the Runnable interface
class MyRunnable implements Runnable {\n    public void run() {\n        System.out.println(\"Runnable running: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        Thread t1 = new Thread(new MyRunnable());\n        t1.start();  // Start the thread\n    }\n}\n

When to Use ?

"},{"location":"langdives/Java/Threads/#thread-lifecycle","title":"Thread Lifecycle","text":"

A thread in Java goes through the following states:

Thread Lifecycle

Example: Thread Lifecycle
class MyThread extends Thread {\n    public void run() {\n        System.out.println(\"Running thread: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        MyThread t1 = new MyThread();  // NEW\n        t1.start();  // RUNNABLE\n\n        // Join to wait for the thread to complete\n        t1.join();  // Terminated once finished\n        System.out.println(\"Thread has terminated.\");\n    }\n}\n

Note

"},{"location":"langdives/Java/Threads/#daemon-threads","title":"Daemon Threads","text":"

A daemon thread is a background thread that provides support services, like the garbage collector. It does not prevent the JVM from shutting down once all user threads are completed.

Example of Daemon Thread
class DaemonThread extends Thread {\n    public void run() {\n        if (Thread.currentThread().isDaemon()) {\n            System.out.println(\"This is a daemon thread.\");\n        } else {\n            System.out.println(\"This is a user thread.\");\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        DaemonThread t1 = new DaemonThread();\n        t1.setDaemon(true);  // Set as daemon thread\n        t1.start();\n\n        DaemonThread t2 = new DaemonThread();\n        t2.start();\n    }\n}\n

When to use Daemon Threads ?

For background tasks like logging, garbage collection, or monitoring services.

"},{"location":"langdives/Java/Threads/#thread-priority","title":"Thread Priority","text":"

Java assigns a priority to each thread, ranging from 1 (MIN_PRIORITY) to 10 (MAX_PRIORITY). The default priority is 5 (NORM_PRIORITY). Thread priority affects scheduling, but it\u2019s platform-dependent \u2014 meaning it doesn\u2019t guarantee execution order.

Setting Thread Priority Example
class PriorityThread extends Thread {\n    public void run() {\n        System.out.println(\"Running thread: \" + Thread.currentThread().getName() +\n                        \" with priority: \" + Thread.currentThread().getPriority());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        PriorityThread t1 = new PriorityThread();\n        PriorityThread t2 = new PriorityThread();\n\n        t1.setPriority(Thread.MIN_PRIORITY);  // Priority 1\n        t2.setPriority(Thread.MAX_PRIORITY);  // Priority 10\n\n        t1.start();\n        t2.start();\n    }\n}\n

When to use Priority Threads

Only when certain tasks should have preferential scheduling over others. However, Java thread scheduling is not guaranteed, so don't rely solely on priority.

"},{"location":"langdives/Java/Threads/#thread-synchronization","title":"Thread Synchronization","text":"

When multiple threads access shared resources (like variables), synchronization ensures that only one thread modifies the resource at a time. Use the synchronized keyword to prevent race conditions.

Synchronization Example
class Counter {\n    private int count = 0;\n\n    public synchronized void increment() {\n        count++;  // Only one thread can increment at a time\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final count: \" + counter.getCount());\n    }\n}\n

When to use Synchronization ?

When multiple threads access critical sections of code to avoid inconsistent data.

"},{"location":"langdives/Java/Threads/#inter-thread-communication","title":"Inter-thread Communication","text":"

Java allows threads to communicate using wait-notify methods, avoiding busy waiting.

Inter-thread Communication Example
class SharedResource {\n    private int value;\n    private boolean available = false;\n\n    public synchronized void produce(int val) throws InterruptedException {\n        while (available) {\n            wait();  // Wait if value is already available\n        }\n        value = val;\n        available = true;\n        System.out.println(\"Produced: \" + value);\n        notify();  // Notify the consumer thread\n    }\n\n    public synchronized void consume() throws InterruptedException {\n        while (!available) {\n            wait();  // Wait if value is not available\n        }\n        System.out.println(\"Consumed: \" + value);\n        available = false;\n        notify();  // Notify the producer thread\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        SharedResource resource = new SharedResource();\n\n        Thread producer = new Thread(() -> {\n            try {\n                for (int i = 1; i <= 5; i++) resource.produce(i);\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread consumer = new Thread(() -> {\n            try {\n                for (int i = 1; i <= 5; i++) resource.consume();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        producer.start();\n        consumer.start();\n    }\n}\n
"},{"location":"langdives/Java/Threads/#thread-local-variables","title":"Thread-Local Variables","text":"

ThreadLocal provides a way to create thread-isolated variables. Each thread gets its own copy of the variable, and changes made by one thread do not affect others. This is useful when you don\u2019t want threads to share a common state.

ThreadLocal Usage Example
public class ThreadLocalExample {\n    private static ThreadLocal<Integer> threadLocal = ThreadLocal.withInitial(() -> 1);\n\n    public static void main(String[] args) {\n        Thread t1 = new Thread(() -> {\n            threadLocal.set(100);\n            System.out.println(\"Thread 1: \" + threadLocal.get());\n        });\n\n        Thread t2 = new Thread(() -> {\n            threadLocal.set(200);\n            System.out.println(\"Thread 2: \" + threadLocal.get());\n        });\n\n        t1.start();\n        t2.start();\n    }\n}\n

When to use ?

Useful in multi-threaded environments (like database transactions) where each thread needs its own context without interference from other threads.

"},{"location":"langdives/Java/Threads/#volatile-variables","title":"Volatile Variables","text":"

The volatile keyword ensures visibility of changes to variables across threads. Without volatile, thread-local caches may not reflect the latest changes made by other threads.

Volatile Example
public class VolatileExample {\n    private static volatile boolean running = true;\n\n    public static void main(String[] args) {\n        Thread t = new Thread(() -> {\n            while (running) {\n                // Busy-wait\n            }\n            System.out.println(\"Thread stopped.\");\n        });\n\n        t.start();\n\n        try { Thread.sleep(1000); } catch (InterruptedException e) { }\n        running = false;  // Change will be visible to other threads\n    }\n}\n

When to Use Volatile

Use volatile for variables accessed by multiple threads without needing synchronization (e.g., flags).

"},{"location":"langdives/Java/Threads/#when-to-use-volatile","title":"When to Use volatile ?","text":" Example Where volatile is Necessary
class VolatileExample {\n    private volatile boolean running = true;\n\n    public void stop() {\n        running = false;  // Change becomes immediately visible to other threads\n    }\n\n    public void run() {\n        while (running) {\n            // Do something\n        }\n        System.out.println(\"Thread stopped.\");\n    }\n\n    public static void main(String[] args) throws InterruptedException {\n        VolatileExample example = new VolatileExample();\n\n        Thread t = new Thread(example::run);\n        t.start();\n\n        Thread.sleep(1000);\n        example.stop();  // Stop the thread\n    }\n}\n
Explanation

Here, volatile ensures that the change to running made by the stop() method is immediately visible to the thread executing run(). Without volatile, the run() thread might never see the change and keep running indefinitely.

"},{"location":"langdives/Java/Threads/#when-not-to-use-volatile","title":"When Not to Use volatile ?","text":" Problem with Volatile for Non-Atomic Operations
class Counter {\n    private volatile int count = 0;\n\n    public void increment() {\n        count++;  // Not atomic! Two threads can still read the same value.\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n

Issue

Even though count is marked volatile, count++ is not atomic. Two threads could read the same value and increment it, leading to lost updates. To fix this, use synchronized or AtomicInteger.

"},{"location":"langdives/Java/Threads/#volatile-vs-synchronized","title":"Volatile vs Synchronized","text":""},{"location":"langdives/Java/Threads/#no-synchronized-or-volatile","title":"No synchronized or volatile ?","text":"

If you don\u2019t use volatile or synchronized, some dangerous scenarios can occur. Like this:

Example
class SharedResource {\n    private boolean available = false;\n\n    public void produce() {\n        available = true;  // Change not guaranteed to be visible immediately\n    }\n\n    public void consume() {\n        while (!available) {\n            // Busy-waiting, might never see the change to `available`\n        }\n        System.out.println(\"Consumed!\");\n    }\n}\n

Problem

If available is not marked volatile, the change made by produce() might not be visible to the consume() thread immediately. The consumer thread might be stuck in an infinite loop because it doesn't see the latest value of available.

Note

"},{"location":"langdives/Java/Threads/#synchronized-over-volatile","title":"Synchronized over volatile ?","text":"

Let's go through an example where its okay to use just synchroinized instead of volatile

Example
public synchronized void produce(int val) throws InterruptedException {\n    while (available) {\n        wait();  // Wait if value is already available\n    }\n    value = val;\n    available = true;\n    System.out.println(\"Produced: \" + value);\n    notify();  // Notify the consumer thread\n}\n

Synchronized Keyword:

Wait-Notify Mechanism:

Because this code uses synchronized methods and wait-notify, the necessary memory visibility is achieved without needing volatile.

"},{"location":"langdives/Java/Threads/#differences","title":"Differences","text":"Aspect volatile synchronized Visibility Ensures visibility of changes. Ensures visibility and atomicity. Atomicity Not guaranteed. Guaranteed (only one thread at a time). Performance Faster (no locking). Slower (locking involved). Use Case For flags, simple state updates. For complex operations, critical sections. Overhead Low (no blocking). High (involves blocking and context switches)."},{"location":"langdives/Java/Threads/#thread-memory","title":"Thread Memory","text":"

The memory consumption per thread and the maximum number of threads in Java depend on several factors, such as:

"},{"location":"langdives/Java/Threads/#memory-used-by-thread","title":"Memory used by Thread","text":"

Each Java thread consumes two key areas of memory:

Thread Stack Memory: Each thread gets its own stack, which holds Local variables (primitives, references), Method call frames, Intermediate results during method execution.

Note

The default stack size depends on the JVM and platform:

You can change the stack size with the -Xss JVM option:

java -Xss512k YourProgram\n

Native Thread Metadata: In addition to stack memory, the OS kernel allocates metadata per thread (for thread control structures). This varies by platform but is typically in the range of 8 KB to 16 KB per thread.

"},{"location":"langdives/Java/Threads/#memory-per-thread","title":"Memory per Thread ?","text":"

The typical memory consumption per thread:

Thus, a single thread could use ~1 MB to 1.1 MB of memory.

"},{"location":"langdives/Java/Threads/#max-threads-you-can-create","title":"Max Threads you Can Create ?","text":"

The number of threads you can create depends on:

Practical Calculation Example

Let's say:

Maximum threads = 6 GB / 1 MB per thread = ~6000 threads.

OS Limits on Threads

Even if memory allows for thousands of threads, the OS imposes limits:

"},{"location":"langdives/Java/Threads/#too-many-threads-created","title":"Too Many Threads Created ?","text":""},{"location":"langdives/Java/Threads/#optimizing-thread-usage","title":"Optimizing Thread Usage","text":"

Rather than creating many threads manually, use thread pools to manage a fixed number of threads efficiently:

Thread Pools Example
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class ThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(10);\n        for (int i = 0; i < 100; i++) {\n            executor.submit(() -> {\n                System.out.println(\"Running thread: \" + Thread.currentThread().getName());\n            });\n        }\n        executor.shutdown();\n    }\n}\n

Thread pools reuse threads, reducing memory usage and improving performance.

"},{"location":"langdives/Java/Threads/#how-to-increase-max-thread","title":"How to Increase Max Thread ?","text":"

On Linux, you can increase the maximum threads per process

Check current limits
ulimit -u  # Max user processes\n
Increase limit (temporary)
ulimit -u 65535\n
Permanent change: Edit '/etc/security/limits.conf' and add
your_user_name  hard  nproc  65535\nyour_user_name  soft  nproc  65535\n

Key points

"},{"location":"langdives/Java/Spring/","title":"Spring","text":""},{"location":"langdives/Java/Spring/#what-is-spring","title":"What is Spring ?","text":"

Spring is a popular, open-source Java-based framework used to create enterprise-level applications. It provides a comprehensive programming and configuration model that simplifies Java development. At its core, Spring focuses on dependency injection and inversion of control (IoC), providing an abstraction over Java's complexity.

"},{"location":"langdives/Java/Spring/#core-goals-of-spring","title":"Core goals of Spring","text":""},{"location":"langdives/Java/Spring/#ecosystem-overview","title":"Ecosystem Overview","text":"

The Spring ecosystem consists of various projects for different use cases:

"},{"location":"langdives/Java/Spring/SpringAnnotations/","title":"Spring Annotations","text":"

A comprehensive list of Spring Boot annotations, covering core Spring Boot, configuration, web, data, testing, and more. I'll organize them by categories with keys (annotation names) and values (purpose/use cases) for easy reference.

"},{"location":"langdives/Java/Spring/SpringAnnotations/#core-annotations","title":"Core Annotations","text":"Annotation Purpose/Use Case @SpringBootApplication Main entry point for a Spring Boot application. Combines @Configuration, @ComponentScan, and @EnableAutoConfiguration. @EnableAutoConfiguration Enables automatic configuration of Spring beans based on the classpath and defined properties. @ComponentScan Scans the package and its sub-packages for Spring components (e.g., @Component, @Service). @Configuration Marks a class as a source of bean definitions. Used to define Spring beans programmatically. @Bean Declares a method as a Spring bean, registered in the application context. @Import Imports additional configuration classes. @ImportResource Loads bean definitions from external XML configuration files."},{"location":"langdives/Java/Spring/SpringAnnotations/#web-and-rest-annotations","title":"Web and REST Annotations","text":"Annotation Purpose/Use Case @RestController Marks a class as a REST API controller. Combines @Controller and @ResponseBody. @Controller Marks a class as a web controller. Works with view templates (like Thymeleaf). @RequestMapping Maps HTTP requests to specific handler methods or classes. Can be used on classes or methods. @GetMapping Maps HTTP GET requests to specific handler methods. @PostMapping Maps HTTP POST requests to specific handler methods. @PutMapping Maps HTTP PUT requests to specific handler methods. @DeleteMapping Maps HTTP DELETE requests to specific handler methods. @PatchMapping Maps HTTP PATCH requests to specific handler methods. @RequestBody Binds the HTTP request body to a Java object. Used in REST controllers. @ResponseBody Binds the return value of a method directly to the HTTP response body. @RequestParam Binds HTTP query parameters to method arguments. @PathVariable Binds URI template variables to method parameters. @RequestHeader Binds HTTP request headers to method parameters. @CookieValue Binds cookie values to method parameters. @ModelAttribute Binds form data to a model object. @SessionAttributes Declares session-scoped model attributes. @CrossOrigin Enables cross-origin requests (CORS) for specific endpoints."},{"location":"langdives/Java/Spring/SpringAnnotations/#jpa-jdbc-annotations","title":"JPA, JDBC Annotations","text":"Annotation Purpose/Use Case @Entity Marks a class as a JPA entity. @Table Specifies the database table for a JPA entity. @Id Marks a field as the primary key of a JPA entity. @GeneratedValue Specifies how the primary key value should be generated. @Column Specifies the mapping of a field to a database column. @OneToOne Establishes a one-to-one relationship between entities. @OneToMany Establishes a one-to-many relationship between entities. @ManyToOne Establishes a many-to-one relationship between entities. @ManyToMany Establishes a many-to-many relationship between entities. @JoinColumn Specifies the foreign key column for a relationship. @Query Defines a custom JPQL or SQL query on a repository method. @Transactional Marks a method or class as transactional. Ensures ACID properties in data operations. @EnableJpaRepositories Enables JPA repositories for data access. @Repository Marks a class as a data repository. @EnableTransactionManagement Enables declarative transaction management."},{"location":"langdives/Java/Spring/SpringAnnotations/#security-annotations","title":"Security Annotations","text":"Annotation Purpose/Use Case @EnableWebSecurity Enables Spring Security for web applications. @EnableGlobalMethodSecurity Enables method-level security annotations like @PreAuthorize and @PostAuthorize. @PreAuthorize Applies authorization logic before a method is invoked. @PostAuthorize Applies authorization logic after a method has executed. @Secured Secures a method by roles (deprecated in favor of @PreAuthorize). @RolesAllowed Specifies which roles are allowed to access a method. @WithMockUser Simulates a user for testing security."},{"location":"langdives/Java/Spring/SpringAnnotations/#testing-annotations","title":"Testing Annotations","text":"Annotation Purpose/Use Case @SpringBootTest Runs integration tests for a Spring Boot application. Loads the full application context. @WebMvcTest Tests only web layer components (e.g., controllers). @DataJpaTest Tests only JPA repositories. Configures an in-memory database. @MockBean Replaces a bean with a mock during tests. @SpyBean Replaces a bean with a spy during tests. @TestConfiguration Provides additional bean configurations for tests. @BeforeEach Runs before each test method in a test class. @AfterEach Runs after each test method in a test class."},{"location":"langdives/Java/Spring/SpringAnnotations/#profiles-annotations","title":"Profiles Annotations","text":"Annotation Purpose/Use Case @ConfigurationProperties Binds external configuration properties to a Java bean. @EnableConfigurationProperties Enables support for @ConfigurationProperties beans. @Profile Specifies the profile under which a bean is active (e.g., dev, prod). @Value Injects a value from the properties or environment. @PropertySource Loads properties from an external file. @Environment Provides access to the current environment settings."},{"location":"langdives/Java/Spring/SpringAnnotations/#actuator-metrics-annotations","title":"Actuator & Metrics Annotations","text":"Annotation Purpose/Use Case @Endpoint Defines a custom Actuator endpoint. @ReadOperation Marks a method as a read operation for an Actuator endpoint. @WriteOperation Marks a method as a write operation for an Actuator endpoint. @DeleteOperation Marks a method as a delete operation for an Actuator endpoint. @Timed Measures the execution time of a method. @Gauge Exposes a gauge metric to Actuator. @Metered Marks a method to be counted as a metric (deprecated in favor of @Timed)."},{"location":"langdives/Java/Spring/SpringAnnotations/#microservices-annotations","title":"Microservices Annotations","text":"Annotation Purpose/Use Case @EnableDiscoveryClient Enables service registration with Eureka, Consul, or Zookeeper. @EnableFeignClients Enables Feign clients for inter-service communication. @CircuitBreaker Implements circuit-breaking logic using Resilience4j. @Retryable Enables retry logic for a method. @LoadBalanced Enables load balancing for REST clients."},{"location":"langdives/Java/Spring/SpringAnnotations/#miscellaneous-annotations","title":"Miscellaneous Annotations","text":"Annotation Purpose/Use Case @Conditional Conditionally registers a bean based on custom logic. @Async Marks a method to run asynchronously. @Scheduled Schedules a method to run at fixed intervals or cron expressions. @EventListener Marks a method to listen for application events. @Cacheable Caches the result of a method. @CacheEvict Evicts entries from a cache."},{"location":"langdives/Java/Spring/SpringAnnotations/#summary","title":"Summary","text":"

This is a comprehensive list of all major Spring Boot annotations, categorized by their functionality. With these annotations, Spring Boot makes it easier to develop applications by reducing boilerplate code, automating configuration, and offering powerful tools for testing, security, and microservices development.

"},{"location":"langdives/Java/Spring/SpringBoot/","title":"Spring Boot","text":"

This article covers how Spring Boot automates configurations, deals with microservices, and manages monitoring, security, and performance.

"},{"location":"langdives/Java/Spring/SpringBoot/#what-is-spring-boot","title":"What is Spring Boot ?","text":"

Spring Boot is an extension of the Spring Framework that simplifies the development of Java applications by offering:

The goal of Spring Boot is to help developers build stand-alone, production-grade applications quickly and with less fuss.

"},{"location":"langdives/Java/Spring/SpringBoot/#application-architecture","title":"Application Architecture","text":"

A Spring Boot application consists of

"},{"location":"langdives/Java/Spring/SpringBoot/#key-components","title":"Key Components","text":""},{"location":"langdives/Java/Spring/SpringBoot/#annotations","title":"Annotations","text":""},{"location":"langdives/Java/Spring/SpringBoot/#starters","title":"Starters","text":"

Starters are pre-configured dependency bundles for common functionalities

"},{"location":"langdives/Java/Spring/SpringBoot/#how-auto-config-works","title":"How Auto-Config Works ?","text":"

Spring Boot uses @EnableAutoConfiguration to detect dependencies and automatically configure beans for you.

For example: If spring-boot-starter-data-jpa is present, it will

  1. Configure a DataSource.
  2. Configure an EntityManagerFactory to manage JPA entities.
  3. Enable transaction management using @EnableTransactionManagement.

How to Debug Auto-Configuration:

"},{"location":"langdives/Java/Spring/SpringBoot/#application-lifecycle","title":"Application Lifecycle","text":"

Startup: Spring Boot applications initialize with SpringApplication.run(), The lifecycle involves loading beans, initializing contexts, and wiring dependencies.

Embedded Server: By default, Spring Boot uses Tomcat as the embedded server. Others include Jetty and Undertow, The server listens on a configurable port (default: 8080).

Shutdown: Spring Boot provides graceful shutdown using @PreDestroy or hooks via SpringApplication.addShutdownHook().

"},{"location":"langdives/Java/Spring/SpringBoot/#configuration-in-depth","title":"Configuration in Depth","text":""},{"location":"langdives/Java/Spring/SpringBoot/#using-applicationproperties","title":"Using application.properties","text":"

Spring Boot applications are configured using either application.properties

Examples of application.properties
server.port=8081\nspring.datasource.url=jdbc:mysql://localhost:3306/mydb\nspring.datasource.username=root\nspring.datasource.password=password\n
"},{"location":"langdives/Java/Spring/SpringBoot/#using-applicationyml","title":"Using application.yml","text":"

Using Profiles (e.g., Dev vs. Prod) in application.yml

Example of application.yml
server:\n  port: 8080\nspring:\n  profiles:\n    active: dev\n\n---\nspring:\n  profiles: dev\n  datasource:\n    url: jdbc:h2:mem:testdb\n\n---\nspring:\n  profiles: prod\n  datasource:\n    url: jdbc:mysql://localhost:3306/proddb\n

You can activate profiles programmatically or through command-line options

$ java -Dspring.profiles.active=prod -jar myapp.jar\n
"},{"location":"langdives/Java/Spring/SpringBoot/#custom-configuration","title":"Custom Configuration","text":"

You can define your own custom properties and inject them into beans using @ConfigurationProperties.

Example
@ConfigurationProperties(prefix = \"custom\")\npublic class CustomConfig {\n    private String name;\n    private int timeout;\n\n    // Getters and Setters\n}\n
application.properties
custom.name=SpringApp\ncustom.timeout=5000\n
Inject the CustomConfig bean
@Autowired\nprivate CustomConfig customConfig;\n
"},{"location":"langdives/Java/Spring/SpringBoot/#embedded-server-customization","title":"Embedded Server Customization","text":"

You can customize the embedded server by configuring the EmbeddedServletContainerFactory.

Changing the Tomcat thread pool size
@Bean\npublic ConfigurableServletWebServerFactory webServerFactory() {\n    TomcatServletWebServerFactory factory = new TomcatServletWebServerFactory();\n    factory.setPort(9090);\n    factory.addConnectorCustomizers(connector -> {\n        connector.setAttribute(\"maxThreads\", 200);\n    });\n    return factory;\n}\n
"},{"location":"langdives/Java/Spring/SpringBoot/#actuator-and-monitoring","title":"Actuator and Monitoring","text":""},{"location":"langdives/Java/Spring/SpringBoot/#spring-boot-actuator","title":"Spring Boot Actuator","text":"

Exposes application management and monitoring endpoints.

Some common endpoints:

Securing Actuator Endpoints
management:\n  endpoints:\n    web:\n      exposure:\n        include: health, info\n  security:\n    enabled: true\n

You can customize or add your own metrics by using @Timed or MeterRegistry.

"},{"location":"langdives/Java/Spring/SpringBoot/#security","title":"Security","text":""},{"location":"langdives/Java/Spring/SpringBoot/#building-microservices","title":"Building Microservices","text":""},{"location":"langdives/Java/Spring/SpringBoot/#testing","title":"Testing","text":""},{"location":"langdives/Java/Spring/SpringBoot/#summary","title":"Summary","text":"

Spring Boot streamlines the development process by providing auto-configuration, embedded servers, and a production-ready environment. It empowers developers to build and deploy microservices quickly, backed by powerful features like Spring Security, Spring Data, Actuator, and more. With its opinionated defaults and deep customizability, Spring Boot strikes a balance between simplicity and flexibility making it ideal for both beginners and advanced developers.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/","title":"Spring Core Framework","text":"

the foundation of the entire Spring ecosystem. We'll explore each component and mechanism in detail, so by the end, you\u2019ll have a thorough understanding of how Spring Core works, including the IoC container, Dependency Injection (DI), Beans, ApplicationContext, Bean Lifecycle, AOP, and more.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-spring-core-framework","title":"What is Spring Core Framework","text":"

The Spring Core Framework is the heart of the Spring ecosystem. It provides the essential features required to build Java applications, with a focus on dependency injection (DI) and inversion of control (IoC). At its core, Spring aims to eliminate the complexities of creating objects, managing dependencies, and wiring different components together.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#modules","title":"Modules","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-core","title":"Spring Core","text":"

The foundational module that provides the IoC container and the basic tools for dependency injection (DI), It includes the core interfaces and classes like BeanFactory, ApplicationContext, BeanPostProcessor, BeanDefinition, and others.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-beans","title":"Spring Beans","text":"

Manages the configuration, creation, and lifecycle of Spring beans.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-context","title":"Spring Context","text":"

Provides a runtime environment for applications using the IoC container. It builds on the Spring Core and adds additional functionality like events and internationalization (i18n).

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-spel","title":"Spring SpEL","text":"

SpEL(Spring Expression Language) A powerful expression language that can be used to dynamically query or manipulate bean properties.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#core-concepts","title":"Core Concepts","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#inversion-of-control-ioc","title":"Inversion of Control (IoC)","text":"

The Inversion of Control (IoC) principle is at the core of the Spring Framework. It shifts the control of object creation and management from the developer to the IoC container, promoting loose coupling and enhancing testability. Let\u2019s break it down conceptually and then dive into the Spring implementation.

In traditional programming, the application code creates and manages its dependencies directly.

Example
public class OrderService {\n    private PaymentService paymentService;\n\n    public OrderService() {\n        this.paymentService = new PaymentService(); // Tight coupling\n    }\n}\n

Explanation

IoC Solution With Inversion of Control (IoC), the responsibility of creating the PaymentService is \"inverted\" and delegated to the Spring IoC container. Now, the IoC container injects the dependency into OrderService.

IoC Solution Example
@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;  // Dependency injection\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-ioc-containers","title":"Types of IoC Containers","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#beanfactory","title":"BeanFactory","text":"

BeanFactory is the basic IoC container in Spring. It provides basic dependency injection and bean management functionality.

Features of BeanFactory:

Usage Example
BeanFactory factory = new XmlBeanFactory(new FileSystemResource(\"beans.xml\"));\nOrderService service = (OrderService) factory.getBean(\"orderService\");\n

However, BeanFactory is rarely used now because it lacks advanced features like event propagation, internationalization, and eager initialization, which are provided by ApplicationContext.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#applicationcontext","title":"ApplicationContext","text":"

ApplicationContext is a more powerful IoC container that extends BeanFactory. It is widely used in modern Spring applications because of its rich features.

Features of ApplicationContext:

Usage Example
ApplicationContext context = new ClassPathXmlApplicationContext(\"beans.xml\");\nOrderService service = context.getBean(OrderService.class);\n

In most cases, developers use AnnotationConfigApplicationContext or Spring Boot to load configurations and manage beans.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beanfactory-vs-applicationcontext","title":"BeanFactory vs ApplicationContext","text":"Aspect BeanFactory ApplicationContext Bean Initialization Lazy (on-demand) Eager (at startup) Event Handling Not supported Supports event handling Internationalization Not supported Supports i18n Bean Lifecycle Hooks Basic Full support for lifecycle hooks Common Usage Legacy or constrained environments Modern Spring applications"},{"location":"langdives/Java/Spring/SpringCoreFramework/#ioc-flow","title":"IoC Flow","text":"

Step-by-Step

  1. Define Beans and Dependencies: Beans can be defined in XML, Java Configuration, or through Annotations like @Component.

    <bean id=\"paymentService\" class=\"com.example.PaymentService\"/>\n<bean id=\"orderService\" class=\"com.example.OrderService\">\n    <constructor-arg ref=\"paymentService\"/>\n</bean>\n

  2. Spring IoC Container Loads Configuration: The IoC container reads the configuration (XML, annotations, or Java-based) during startup.

  3. Dependency Injection (DI): The IoC container identifies the dependencies and injects them using constructor, setter, or field injection.

  4. Bean Initialization: The IoC container initializes all necessary beans (eagerly or lazily).

  5. Bean Usage: The beans are now available for use by the application.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#dependency-injection-di","title":"Dependency Injection (DI)","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-di","title":"What is DI ?","text":"

Dependency Injection (DI) is a pattern where objects are provided with their dependencies at runtime by the IoC container instead of creating them directly. Spring supports multiple types of dependency injection:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-di","title":"Types of DI","text":"

Constructor Injection: Dependencies are provided through the class constructor, Recommended for mandatory dependencies.

Constructor Injection Example
@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n

Setter Injection: Dependencies are injected using setter methods, Useful for optional dependencies.

Setter Injection Example
@Component\npublic class OrderService {\n    private PaymentService paymentService;\n\n    @Autowired\n    public void setPaymentService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n

Field Injection: Dependencies are injected directly into class fields, Not recommended since it makes unit testing harder.

Field Injection
@Component\npublic class OrderService {\n    @Autowired\n    private PaymentService paymentService;\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#why-ioc-and-di-are-essential","title":"Why IoC and DI are Essential","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#challenges-with-ioc","title":"Challenges with IoC","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#full-ioc-implementation","title":"Full IoC Implementation","text":"

Let\u2019s look at a complete example using Spring Core with constructor injection:

Full IoC Implementation Example Java Configuration Example
@Configuration\npublic class AppConfig {\n\n    @Bean\n    public PaymentService paymentService() {\n        return new PaymentService();\n    }\n\n    @Bean\n    public OrderService orderService(PaymentService paymentService) {\n        return new OrderService(paymentService);\n    }\n}\n
OrderService and PaymentService
@Component\npublic class PaymentService {\n    public void processPayment() {\n        System.out.println(\"Payment processed.\");\n    }\n}\n\n@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n\n    public void placeOrder() {\n        System.out.println(\"Order placed.\");\n        paymentService.processPayment();\n    }\n}\n
Main Class to Run
public class Main {\n    public static void main(String[] args) {\n        ApplicationContext context = new AnnotationConfigApplicationContext(AppConfig.class);\n        OrderService orderService = context.getBean(OrderService.class);\n        orderService.placeOrder();\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beans-and-ioc-container","title":"Beans and IoC Container","text":"

Spring beans are the building blocks of any Spring application. They represent the objects that the Spring IoC container manages throughout their lifecycle. Understanding beans and their lifecycle is critical for mastering the Spring Core framework. Let\u2019s explore everything about beans\u2014from creation, scopes, lifecycle, initialization, destruction, and more\u2014in detail.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-a-bean","title":"What is a Bean ?","text":"

A bean in Spring is an object that is instantiated, assembled, and managed by the IoC container. The container creates, initializes, and wires these beans, ensuring that all dependencies are injected as needed. Beans are usually defined using:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#configuring-beans","title":"Configuring Beans","text":"

Spring provides multiple ways to declare beans and register them with the IoC container:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#xml-based-configuration","title":"XML-based Configuration","text":"

Traditional way of defining beans using XML files.

XML Config Example
<beans xmlns=\"http://www.springframework.org/schema/beans\" \n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n    xsi:schemaLocation=\"http://www.springframework.org/schema/beans \n    http://www.springframework.org/schema/beans/spring-beans.xsd\">\n\n    <bean id=\"paymentService\" class=\"com.example.PaymentService\"/>\n    <bean id=\"orderService\" class=\"com.example.OrderService\">\n        <constructor-arg ref=\"paymentService\"/>\n    </bean>\n</beans>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#java-based-configuration","title":"Java-based Configuration","text":"

Spring allows you to use Java classes to define beans. This is cleaner and avoids XML boilerplate.

Java Config Example
@Configuration\npublic class AppConfig {\n\n    @Bean\n    public PaymentService paymentService() {\n        return new PaymentService();\n    }\n\n    @Bean\n    public OrderService orderService() {\n        return new OrderService(paymentService());\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#component-scanning-with-annotations","title":"Component Scanning with Annotations","text":"

You can annotate classes with @Component, @Service, @Repository, or @Controller. Spring automatically detects these beans if @ComponentScan is enabled.

Annotations Example
@Component\npublic class PaymentService { }\n\n@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n
@Configuration\n@ComponentScan(basePackages = \"com.example\")\npublic class AppConfig { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-scopes","title":"Bean Scopes","text":"

The scope of a bean defines the lifecycle and visibility of that bean within the Spring IoC container.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-bean-scopes","title":"Types of Bean Scopes","text":"

singleton (default): A single instance of the bean is created and shared across the entire application, Used for stateless beans.

Example
@Scope(\"singleton\")\n@Component\npublic class SingletonBean { }\n

prototype: A new instance is created every time the bean is requested, Useful for stateful objects or temporary tasks.

Example
@Scope(\"prototype\")\n@Component\npublic class PrototypeBean { }\n

request: A new bean instance is created for each HTTP request, Used in web applications.

session: A single instance is created per HTTP session.

globalSession: A global session scope for applications using portlets.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-lifecycle","title":"Bean Lifecycle","text":"

Each Spring bean goes through several lifecycle phases, starting from instantiation to destruction. The Spring IoC container manages this lifecycle internally.

Bean Lifecycle Phases

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-lifecycle-callbacks","title":"Bean Lifecycle Callbacks","text":"

InitializingBean Interface: If a bean implements the InitializingBean interface, it must override the afterPropertiesSet() method, which is called after all properties are set.

Example
public class MyService implements InitializingBean {\n    @Override\n    public void afterPropertiesSet() {\n        System.out.println(\"MyService is initialized.\");\n    }\n}\n

DisposableBean Interface: If a bean implements the DisposableBean interface, it must override the destroy() method, which is called during the destruction phase.

Example
public class MyService implements DisposableBean {\n    @Override\n    public void destroy() {\n        System.out.println(\"MyService is being destroyed.\");\n    }\n}\n

Using @PostConstruct and @PreDestroy: These annotations are the recommended way to manage initialization and destruction callbacks.

Example
@Component\npublic class MyService {\n\n    @PostConstruct\n    public void init() {\n        System.out.println(\"Initialization logic in @PostConstruct.\");\n    }\n\n    @PreDestroy\n    public void cleanup() {\n        System.out.println(\"Cleanup logic in @PreDestroy.\");\n    }\n}\n

Custom Initialization and Destruction Methods: You can also specify custom methods in the bean configuration.

Example (Java Config)
@Bean(initMethod = \"init\", destroyMethod = \"cleanup\")\npublic MyService myService() {\n    return new MyService();\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#eager-vs-lazy-initialization","title":"Eager vs. Lazy Initialization","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#eager-initialization","title":"Eager Initialization","text":"

All singleton beans are created at the time of application startup (default behavior), Useful for performance since all dependencies are resolved upfront.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#lazy-initialization","title":"Lazy Initialization","text":"

Beans are created only when they are first requested, You can enable lazy initialization at the bean level using @Lazy.

Example
@Lazy\n@Component\npublic class LazyBean { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#di-and-bean-relationships","title":"DI and Bean Relationships","text":"

Spring IoC container resolves bean dependencies through constructor injection, setter injection, or field injection, You can specify bean dependencies explicitly using the depends-on attribute in XML.

Example (XML)
<bean id=\"databaseConnection\" class=\"com.example.DatabaseConnection\"/>\n<bean id=\"orderService\" class=\"com.example.OrderService\" depends-on=\"databaseConnection\"/>\n

This ensures that the databaseConnection bean is initialized before the orderService bean.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beans-circular-dependencies","title":"Beans Circular Dependencies","text":"

A circular dependency occurs when two or more beans are mutually dependent on each other.

Example
@Component\npublic class A {\n    @Autowired\n    private B b;\n}\n\n@Component\npublic class B {\n    @Autowired\n    private A a;\n}\n

Spring tries to resolve circular dependencies using singleton beans by injecting proxies, but it fails for constructor injection. To avoid circular dependencies: - Refactor the code to reduce dependencies. - Use setter injection instead of constructor injection.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-definition-inheritance","title":"Bean Definition Inheritance","text":"

Spring allows bean definitions to inherit properties from a parent bean. This helps reduce configuration duplication.

Example (XML)
<bean id=\"parentBean\" class=\"com.example.BaseService\">\n    <property name=\"name\" value=\"Base Service\"/>\n</bean>\n\n<bean id=\"childBean\" class=\"com.example.ChildService\" parent=\"parentBean\">\n    <property name=\"name\" value=\"Child Service\"/>\n</bean>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#aspect-oriented-programming","title":"Aspect-Oriented Programming","text":"

AOP allows you to separate cross-cutting concerns (like logging, security, or transaction management) from the business logic. In Spring, AOP is implemented using aspects, advice, and pointcuts.

AOP Example in Spring Define an Aspect
@Aspect\n@Component\npublic class LoggingAspect {\n    @Before(\"execution(* com.example.*.*(..))\")\n    public void logBefore(JoinPoint joinPoint) {\n        System.out.println(\"Before method: \" + joinPoint.getSignature().getName());\n    }\n}\n
Enable AOP
@Configuration\n@EnableAspectJAutoProxy\npublic class AppConfig { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-events","title":"Spring Events","text":"

Spring supports an event-driven model that allows you to build decoupled components. The ApplicationContext can publish events and allow listeners to respond to them.

Example of a Custom Event
public class CustomEvent extends ApplicationEvent {\n    public CustomEvent(Object source) {\n        super(source);\n    }\n}\n
@Component\npublic class CustomEventListener implements ApplicationListener<CustomEvent> {\n    @Override\n    public void onApplicationEvent(CustomEvent event) {\n        System.out.println(\"Received custom event: \" + event);\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-expression-lang-spel","title":"Spring Expression Lang (SpEL)","text":"

SpEL allows you to manipulate and query beans dynamically. It can be used inside XML or annotations.

Example of SpEL
<bean id=\"myBean\" class=\"com.example.MyClass\">\n    <property name=\"value\" value=\"#{2 + 3}\"/>\n</bean>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#summary","title":"Summary","text":""},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/","title":"Difference B/W Spring Framework & Boot","text":"

A detailed comparison table that covers all possible differences between Spring Boot and Spring Framework. This comparison covers everything from setup, configuration, embedded servers, web applications, testing, microservices support, and much more.

"},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/#differences","title":"Differences","text":"Category Spring Framework Spring Boot Purpose A comprehensive framework for building Java applications. An extension of Spring Framework to simplify configuration and create stand-alone applications. Setup Requires manual setup, including XML or Java-based configuration. Minimal setup with auto-configuration based on the classpath. Main Focus Provides flexibility and control over every aspect of the application. Focuses on rapid development with sensible defaults and opinions. Configuration Can use XML, Java-based, or annotation-based configuration. Uses annotations and properties/YAML files for configuration. Learning Curve Requires more learning time due to complexity. Easier to get started with for beginners due to auto-configuration and pre-built setups. Project Dependencies Requires managing multiple dependencies for each feature manually. Provides starters (e.g., spring-boot-starter-web) to include required dependencies. Embedded Server No embedded server support; WAR files must be deployed to external servers (Tomcat, Jetty, etc.). Comes with embedded servers (Tomcat, Jetty, or Undertow) for running stand-alone applications. Deployment Deploy WAR/EAR files to external servers. Runs applications directly as JAR files with embedded servers. Auto-Configuration No auto-configuration; requires manual configuration of components. Auto-configures components based on available classpath dependencies. Application Entry Point Relies on external servlet containers to manage the lifecycle. Uses @SpringBootApplication as the entry point with SpringApplication.run(). Microservices Support Not specialized for microservices; requires additional tools. Built with microservices architecture in mind. Supports Spring Cloud, Eureka, Feign, etc. Performance Requires more configuration to optimize performance. Better suited for lightweight and high-performance microservices. Profiles Supports profiles for environment-specific configurations but requires more setup. Supports profiles easily through application.yml or application.properties. Testing Provides JUnit support but requires manual configuration for context loading. Provides easy testing with @SpringBootTest, @MockBean, @DataJpaTest, and others. Security Uses Spring Security but requires manual integration. Integrates Spring Security easily with spring-boot-starter-security. Database Access Provides JDBC, JPA, ORM support, but requires more configuration. Simplifies database access with Spring Data JPA and auto-configuration of DataSource. Starters and Dependency Management Requires manual management of dependencies and configurations. Provides Spring Boot Starters that bundle all required dependencies for specific use cases. Template Engines Supports Thymeleaf, JSP, and others with manual setup. Supports template engines with starters (e.g., spring-boot-starter-thymeleaf). Command-Line Interface (CLI) No built-in CLI support. Provides Spring Boot CLI to run Groovy scripts for quick development. Actuator and Monitoring Requires external monitoring tools or custom configurations. Comes with Spring Boot Actuator to monitor application health, metrics, and endpoints. DevTools for Hot Reload Requires manual setup for hot reloading of code changes. Provides Spring Boot DevTools for hot reloading during development. Support for Reactive Programming Supports Spring WebFlux and Project Reactor (from version 5.x). Fully supports Spring WebFlux for reactive, non-blocking programming. Circuit Breakers & Resilience Requires integration with third-party libraries like Hystrix. Seamlessly integrates with Resilience4j and Spring Cloud for resilience. Integration with Cloud Platforms Requires Spring Cloud or manual setup for cloud integration. Seamlessly integrates with Spring Cloud for cloud-native development. Logging Configuration Requires manual configuration of logging frameworks (e.g., Log4j, SLF4J). Provides auto-configured logging using Logback by default. Health Checks and Metrics Requires manual configuration to expose health metrics. Provides Actuator endpoints (/actuator/health, /actuator/metrics) out-of-the-box. Web Framework Uses Spring MVC for building web applications. Uses Spring MVC or Spring WebFlux with easy setup through starters. Restful API Development Requires manual setup of controllers and components. Provides easy development with @RestController and auto-configuration of REST endpoints. Command-Line Arguments Support Requires manual handling of command-line arguments. Easily reads command-line arguments with SpringApplication or @Value. Caching Support Requires setting up EhCache, Guava, or other caching solutions manually. Provides easy caching configuration with @EnableCaching and auto-configuration. Internationalization (i18n) Supports i18n but requires more setup. Supports i18n with minimal configuration through application.properties. Job Scheduling Requires integration with Quartz or other scheduling libraries. Supports scheduling with @Scheduled and Task Executors. Dependency Injection (DI) Provides dependency injection with IoC container. Same as Spring Framework but simplifies it with auto-wiring using @Autowired. Backward Compatibility Must manually update configurations when upgrading versions. Provides backward compatibility with most Spring projects. Community and Ecosystem Large community and extensive ecosystem. Built on top of Spring Framework with additional tools for modern development."},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/#summary","title":"Summary","text":"

Spring Boot simplifies Spring Framework by:

Spring Framework gives more control and flexibility but at the cost of manual setup and configuration.

Spring Boot is optimized for rapid development, especially for microservices and cloud-native applications.

Spring Framework is still relevant for legacy applications or when fine-grained control is necessary.

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/","title":"High Availability and Fault Tolerance","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#high-availability-ha","title":"High Availability (HA)","text":"

High Availability (HA) refers to a system or infrastructure's ability to remain operational and accessible for a very high percentage of time, minimizing downtime. In essence, it's a design principle used in IT to ensure that services, applications, or systems are continuously available, even in the event of hardware failures, software issues, or unexpected disruptions.

Key Aspects of High Availability

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-levels","title":"HA Levels","text":"

Availability is often expressed as a percentage. For instance, an uptime of 99.99% means the service is expected to be down for only 52 minutes in a year.

Common availability standards

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-use-cases","title":"HA Use Cases","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-challenges","title":"HA Challenges","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-when-to-use","title":"HA When to Use ?","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#fault-tolerance-ft","title":"Fault Tolerance (FT)","text":"

Fault Tolerance refers to the ability of a system, network, or application to continue functioning correctly even when one or more of its components fail. It ensures continuous operation without loss of service, despite hardware, software, or other types of faults occurring in the system. Fault tolerance plays a crucial role in ensuring high reliability and availability of critical systems.

Key Principles of Fault Tolerance

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-techniques","title":"FT Techniques:","text":"
  1. Hardware Fault Tolerance:

    • RAID (Redundant Array of Independent Disks): Data is mirrored or striped across multiple hard drives to prevent data loss from disk failure.
    • Dual Power Supplies: Servers often include multiple power supplies to prevent failure if one unit fails.
    • Hot Swapping: Faulty components like disks or power units can be replaced without shutting down the system.
  2. Software Fault Tolerance:

    • Checkpoints and Rollbacks: Systems can save checkpoints periodically, and if an error occurs, they revert to the last known good state.
    • Replication in Distributed Systems: Critical services are duplicated across multiple servers to ensure that if one server fails, others take over.
  3. Network Fault Tolerance:

    • Multiple Network Paths: Routing data over multiple paths ensures that if one link fails, another path is used.
    • Load Balancers: They distribute network traffic across multiple servers or systems, ensuring no single point of failure.
  4. Error Detection and Recovery:

    • Watchdog Timers: Monitor system processes and restart them if they hang.
    • Checksums and Parity Checks: Verify data integrity and correct transmission errors.
"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-real-world-examples","title":"FT Real-World Examples","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-challenges","title":"FT Challenges","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-in-contrast-to-resilience","title":"FT in Contrast to Resilience","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-when-to-use","title":"FT When to Use ?","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-vs-ft","title":"HA vs FT","text":"

High Availability (HA) and Fault Tolerance (FT) are two strategies aimed at keeping systems operational, but they approach this goal differently. Here's a detailed comparison:

In other words, high availability aims to reduce downtime, whereas fault-tolerant systems aim for zero downtime, even during failures.

Aspect High Availability (HA) Fault Tolerance (FT) Definition Ensures minimal downtime by quickly switching to backup systems when a failure occurs. Ensures continuous operation even during failures, with no noticeable interruption. Goal Minimize downtime and ensure service is restored quickly. Eliminate downtime and maintain seamless operation during faults. Approach Uses redundant components and failover systems to switch operations when needed. Uses duplication of systems to ensure tasks are always mirrored on another system. Downtime Small amount of downtime during failover (milliseconds to minutes). No downtime; systems operate continuously, even during faults. Example Use Cases E-commerce websites (e.g., Amazon) that switch servers when one fails. Airplane control systems, which cannot afford any interruptions. Redundancy Type Active-Passive: Backup components are activated only when primary systems fail. Active-Active: All components are working simultaneously, and one continues if the other fails. Cost Less expensive since backup systems are not always active. More expensive due to constant replication and active systems running in parallel. Complexity Easier to implement and manage due to reliance on failover mechanisms. More complex, requiring real-time synchronization and parallel operation. Performance Impact Some performance hit during failover but minimal. Higher overhead, as multiple systems operate simultaneously. Use Case Example Cloud platforms (like AWS) use high availability to ensure that servers recover quickly after a failure. Nuclear power plants employ fault-tolerant systems to keep critical processes running with no interruptions. Failure Handling Handles component failures through redundancy and quick recovery mechanisms. Prevents failure from affecting the system by running identical processes or systems in parallel."},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#summary","title":"Summary","text":"

In summary, high availability ensures that critical systems are always accessible with minimal interruptions. Organizations rely on HA strategies to meet customer expectations, protect revenue, and ensure business continuity, especially in industries where even a small amount of downtime can have serious consequences and fault tolerance is the ability of a system to keep operating without interruption despite experiencing faults or failures. It is crucial for mission-critical systems in industries like aviation, finance, and healthcare, where downtime or errors could lead to catastrophic outcomes.

In essence, High Availability focuses on minimizing downtime by recovering quickly from failures, while Fault Tolerance eliminates downtime altogether by ensuring the system continues running seamlessly. HA is less costly and easier to implement, while FT is expensive and complex but essential for critical environments where even a few seconds of downtime are unacceptable.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/","title":"Docker and Kubernetes","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#overview","title":"Overview","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#docker","title":"Docker","text":"

Docker is a platform that enables developers to build, package, and run applications in lightweight containers. It ensures applications are portable and can run consistently across different environments, from development to production.

Key Components

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#kubernetes","title":"Kubernetes","text":"

Kubernetes (often abbreviated as K8s) is an open-source platform for automating the deployment, scaling, and management of containerized applications. It abstracts away the complexity of running containers at scale across multiple machines.

Key Components

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#hierarchy-and-relationship","title":"Hierarchy and Relationship","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#docker-vs-kubernetes","title":"Docker vs Kubernetes","text":"

Docker focuses on building, packaging, and running containers. It handles application-level concerns whereas Kubernetes focuses on orchestrating, scaling, and managing containers across distributed environments.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#how-they-work-together","title":"How they Work Together","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#using-docker-with-kubernetes","title":"Using Docker with Kubernetes","text":"Steps

Create a Dockerfile for your application and build the image Step-1: Build Docker Images

docker build -t my-app:latest .\n

Store the image in a registry (e.g., Docker Hub) Step-2: Push to Registry

docker push my-app:latest\n

Use the built Docker image in a Kubernetes Deployment YAML file and apply it with: Deploy on Kubernetes

kubectl apply -f deployment.yaml\n

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#4-creating-and-managing-docker-and-kubernetes-components-individually","title":"4. Creating and Managing Docker and Kubernetes Components Individually","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#creating-docker-components","title":"Creating Docker Components","text":"
  1. Install Docker: Follow Docker\u2019s official installation guide for your operating system.

  2. Create a Dockerfile: Example:

    FROM python:3.9\nWORKDIR /app\nCOPY . .\nRUN pip install -r requirements.txt\nCMD [\"python\", \"app.py\"]\n

  3. Build and Run Docker Containers:

  4. Build Image:
    docker build -t my-app:latest .\n
  5. Run Container:

    docker run -d -p 5000:5000 my-app:latest\n

  6. Use Docker Compose:

  7. Define services in a docker-compose.yml:
    version: '3.8'\nservices:\n  web:\n    image: my-app:latest\n    ports:\n      - \"5000:5000\"\n
  8. Start services with:
    docker-compose up\n
"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#creating-kubernetes-components","title":"Creating Kubernetes Components","text":"
  1. Install Kubernetes: Use Minikube for local development or create a production cluster using cloud providers like GKE, AKS, or EKS.

  2. Define Kubernetes Resources: Create YAML manifests for Deployments and Services.

  3. Deployment YAML:

    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: my-app\n  template:\n    metadata:\n      labels:\n        app: my-app\n    spec:\n      containers:\n        - name: my-app\n          image: my-app:latest\n          ports:\n            - containerPort: 5000\n

  4. Service YAML:

    apiVersion: v1\nkind: Service\nmetadata:\n  name: my-app-service\nspec:\n  type: NodePort\n  ports:\n    - port: 5000\n      targetPort: 5000\n  selector:\n    app: my-app\n

  5. Deploy to Kubernetes:

    kubectl apply -f deployment.yaml\nkubectl apply -f service.yaml\n

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#5-deployment-scenarios","title":"5. Deployment Scenarios","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#single-machine-deployment","title":"Single Machine Deployment","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#multiple-machines-deployment","title":"Multiple Machines Deployment","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#6-communication-between-services","title":"6. Communication Between Services","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#7-key-differences-between-docker-compose-and-kubernetes","title":"7. Key Differences Between Docker Compose and Kubernetes","text":"Aspect Docker Compose Kubernetes Purpose Local development and testing Production orchestration Configuration Single docker-compose.yml file Multiple YAML files for resources Scaling Manual Automated scaling with kubectl scale High Availability Limited Built-in redundancy and self-healing Use Case Simple applications on one machine Complex workloads across clusters"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#8-additional-considerations","title":"8. Additional Considerations","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#9-how-to-use-docker-compose-to-get-the-application-from-github-and-build-the-docker-image","title":"9. How to Use Docker Compose to Get the Application from GitHub and Build the Docker Image","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#getting-from-github-and-using-docker-compose-build-context","title":"Getting from Github and using Docker Compose Build Context","text":"

Docker Compose can also use the build context to pull code directly from GitHub when creating an image. Here\u2019s how:

  1. Create a docker-compose.yml with GitHub Repository as the Build Context:
version: '3.8'\nservices:\n  app:\n    build:\n      context: https://github.com/your-username/your-repo.git\n      dockerfile: Dockerfile\n    ports:\n      - \"5000:5000\"\n
  1. Run Docker Compose to Build and Start the Container:
docker-compose up --build\n

Note: - This method works only if the GitHub repository is public. For private repositories, you\u2019ll need to provide authentication (e.g., via SSH keys or GitHub tokens). - Docker will pull the latest version from the specified GitHub repository and build the image based on the Dockerfile in the repository.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#10-silly-and-practical-questions-numbered","title":"10. Silly and Practical Questions (Numbered)","text":"
  1. Can I use a Docker Compose file with Kubernetes? Not directly. Kubernetes doesn\u2019t understand Docker Compose syntax, but there are tools like Kompose that can convert Docker Compose files into Kubernetes YAML files.

  2. What happens if I try to run a Docker Compose file inside a Kubernetes cluster? It won\u2019t work. Kubernetes will look at you confused (figuratively), because it expects YAML manifests with its own syntax, not a docker-compose.yml.

  3. Why do Kubernetes YAML files look scarier than Docker Compose files? Kubernetes YAML files are more complex because they handle more advanced scenarios like scaling, networking, and rolling updates, which Docker Compose doesn\u2019t attempt to address.

  4. Do I need to uninstall Docker if I switch to Kubernetes? Nope! Docker is still useful for building and testing images locally, even if you\u2019re deploying to Kubernetes. In fact, Kubernetes can use Docker as a container runtime.

  5. Will Docker containers fight with Kubernetes Pods if they run on the same machine? Nope, they\u2019ll coexist peacefully. Docker containers and Kubernetes Pods can run side by side without conflict. They\u2019re friends, not rivals!

  6. Can I copy-paste my Docker Compose file into Kubernetes and hope it works? Sorry, no shortcuts here. You need to convert the Compose file into Kubernetes resources, either manually or using tools like Kompose.

  7. Is Docker Compose faster than Kubernetes because it has fewer YAML files? Yes, Docker Compose is faster to set up for local development because it\u2019s simpler. But for production-scale orchestration, Kubernetes is much more powerful.

  8. How do I know if my container is happy inside a Kubernetes Pod? Check with this command:

    kubectl get pods\n
    If the status is Running, your container is content. If you see CrashLoopBackOff, it\u2019s definitely not happy!

  9. Can I use Kubernetes without the cloud, or will it complain? You can use Minikube or kind (Kubernetes in Docker) to run Kubernetes locally on your machine. No cloud required.

  10. What\u2019s the difference between docker-compose up and kubectl apply -f?

    • docker-compose up: Starts containers defined in a docker-compose.yml file.
    • kubectl apply -f: Deploys resources (like Pods, Deployments) described in a Kubernetes YAML file to your cluster.
  11. Do I still need to learn Docker Swarm if I already know Kubernetes? Not really. Docker Swarm is simpler but not as widely used in production as Kubernetes. Kubernetes has become the de facto standard.

  12. Can a single Pod run multiple Docker Compose services? Yes! A Pod can run multiple containers, similar to how Docker Compose runs multiple services. However, in Kubernetes, these containers should be tightly coupled (e.g., sharing resources).

  13. If Docker Compose is easier, why do people torture themselves with Kubernetes? Kubernetes offers features like scaling, self-healing, and load balancing. It\u2019s overkill for simple projects but essential for large, distributed applications.

  14. Is Kubernetes just a fancy way of saying, \u201cI don\u2019t want to use Docker Compose\u201d? Not exactly. Docker Compose is great for local setups, while Kubernetes is a powerful orchestration tool for running applications across multiple nodes at scale.

  15. What\u2019s the difference between a Pod and a Container? Can I use the words interchangeably? Not quite. A Pod is a wrapper that can contain one or more containers. Pods are the smallest deployable unit in Kubernetes, but a container is just an isolated environment for running applications.

  16. If a container crashes in Kubernetes, does Kubernetes get sad? Nope! Kubernetes will restart the container automatically. That\u2019s part of its self-healing magic.

  17. Will my application break if I use a Docker image from 2015? It might! Older images could have compatibility issues or security vulnerabilities. Use them only if you\u2019re sure they still meet your needs.

  18. Is Kubernetes allergic to Windows, or will it run happily there? Kubernetes supports Windows nodes, but the experience is smoother with Linux. Most people deploy Kubernetes on Linux-based clusters.

  19. Can I use both Docker and Kubernetes at the same time? Or will it cause chaos? Yes, you can use both. Build your containers with Docker, push them to a registry, and deploy them with Kubernetes. No chaos \u2013 just smooth workflows.

  20. Why can\u2019t Docker Compose just learn scaling and take over Kubernetes' job? Docker Compose is intentionally lightweight and simple. Adding Kubernetes-like features would complicate it and defeat its original purpose.

  21. How much YAML is too much YAML? If you start dreaming in YAML, it\u2019s probably too much. But seriously, Kubernetes relies heavily on YAML, so learning to manage it effectively is key.

  22. Can Kubernetes work without YAML files? (Please say yes!) Unfortunately, no. YAML files are essential for defining resources in Kubernetes. You can use Helm charts to simplify it, but YAML is unavoidable.

  23. What happens if I forget to push my Docker image before deploying with Kubernetes? Your deployment will fail because Kubernetes won\u2019t find the image in the registry. Always remember to push!

  24. Can I use kubectl commands on Docker containers? Nope. kubectl is specifically for managing Kubernetes resources. Use docker commands for Docker containers.

  25. Is Kubernetes only for tech wizards, or can normal humans use it too? Normal humans can use it too! The learning curve is steep, but with practice, anyone can master it.

  26. Do I need to sacrifice sleep to understand Kubernetes? Maybe at first. But once you get the hang of it, Kubernetes will become your friend, and sleep will return.

  27. Can a Docker container tell the difference between running on Kubernetes and Docker Compose? Nope! The container itself doesn\u2019t care where it\u2019s running. As long as it gets its dependencies and configuration, it\u2019ll happily run anywhere.

  28. Can I run two Docker Compose files on one machine? Yes, use the -p option to specify different project names for each Compose file.

  29. Can services communicate across multiple machines? Yes, with Docker Swarm or Kubernetes, services can communicate across machines using overlay networks or Kubernetes networking.

  30. Is Docker Compose suitable for production? Not recommended for large-scale production. Use Kubernetes or Docker Swarm instead.

  31. How do I set up Kubernetes on a single machine? Use Minikube to run a local Kubernetes cluster.

  32. What file formats are used by Docker and Kubernetes? Docker uses Dockerfile and docker-compose.yml. Kubernetes uses YAML files for resources like Deployments and Services.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/","title":"ElasticSearch","text":"

Elasticsearch is a search engine based on Apache Lucene. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#elasticsearch-basics-and-fundamentals","title":"ElasticSearch Basics and Fundamentals","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-core-concepts","title":"1. Core Concepts:","text":"

Diving into Elasticsearch's core concepts is essential for understanding its architecture and functionality.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-documents-and-indices","title":"1.1 Documents and Indices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-mapping-and-types","title":"1.2 Mapping and Types","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-shards-and-replicas","title":"1.3 Shards and Replicas","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-cluster-nodes-and-roles","title":"1.4 Cluster, Nodes, and Roles","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#15-elasticsearch-api-actions-crud","title":"1.5 Elasticsearch API Actions (CRUD)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-the-core-concepts","title":"Summary of the Core Concepts","text":"
  1. Data is divided into documents, stored in indices, and distributed across shards.
  2. Nodes work together in a cluster, balancing the load for efficient querying and data redundancy.
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-inverted-index","title":"2. Inverted Index","text":"

An inverted index is a fundamental data structure in Elasticsearch and other search engines. It optimizes search efficiency by storing a mapping from terms (words) to their locations within documents. Let\u2019s break it down into key components and processes:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-core-structure-of-inverted-index","title":"2.1. Core Structure of Inverted Index","text":"

For instance, if a dataset contains the documents: - Doc 1: \u201cElasticsearch powers search\u201d - Doc 2: \u201cSearch powers insights\u201d

The inverted index would look like this:

\"Elasticsearch\" -> [Doc 1]\n\"powers\" -> [Doc 1, Doc 2]\n\"search\" -> [Doc 1, Doc 2]\n\"insights\" -> [Doc 2]\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-building-the-inverted-index","title":"2.2. Building the Inverted Index","text":"

The process involves several stages: - Tokenization: Splitting text into words or tokens. - Normalization: Making tokens consistent, like converting to lowercase. - Stemming/Lemmatization (optional): Reducing words to their base or root forms. - Indexing: Populating the index with terms and the corresponding document references.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-how-searches-work","title":"2.3. How Searches Work","text":"

When a user searches for a term, Elasticsearch retrieves the postings list from the inverted index, quickly locating documents containing that term. For multi-term queries, Elasticsearch can intersect postings lists, using logical operations (e.g., AND, OR) to combine or filter results.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-optimizations","title":"2.4. Optimizations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-benefits","title":"2.5. Benefits","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-the-inverted-index","title":"Summary of the Inverted Index","text":"

Inverted indices are the foundation of Elasticsearch\u2019s speed and relevance in text search. This structure is tailored for high performance in full-text search scenarios, especially when complex queries and filtering are involved.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-analyzers","title":"3. Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-what-is-an-analyzer","title":"3.1. What is an Analyzer?","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-components-of-an-analyzer","title":"3.2. Components of an Analyzer","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-built-in-analyzers","title":"3.3. Built-in Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-custom-analyzer-configuration","title":"3.4. Custom Analyzer Configuration","text":"

Creating a custom analyzer involves defining: - A tokenizer (e.g., edge-ngram tokenizer for partial word matches). - A list of token filters to process the tokens (e.g., synonym filters, ASCII folding for diacritical marks).

Example configuration:

{\n  \"analysis\": {\n    \"analyzer\": {\n      \"custom_analyzer\": {\n        \"type\": \"custom\",\n        \"tokenizer\": \"whitespace\",\n        \"filter\": [\"lowercase\", \"stop\", \"synonym\"]\n      }\n    }\n  }\n}\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-usage-during-indexing-and-querying","title":"3.5. Usage During Indexing and Querying","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-practical-applications-of-custom-analyzers","title":"3.6. Practical Applications of Custom Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-benefits-of-analyzers","title":"3.7. Benefits of Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-analyzers","title":"Summary of Analyzers.","text":"

Analyzers transform raw text into optimized, searchable data, playing a critical role in making Elasticsearch searches accurate and efficient.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-elasticsearch-queries","title":"4. ElasticSearch Queries","text":"

In Elasticsearch, queries are central to retrieving data. They\u2019re categorized as leaf queries (operating on specific fields) and compound queries (combining multiple queries). Here's a deep dive into each type, with examples to illustrate their functionality:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-leaf-queries","title":"4.1 Leaf Queries","text":"

These are standalone, field-specific queries (like term and match above) that don\u2019t depend on other queries to function.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#411-match-queries-for-full-text-search","title":"4.1.1 Match Queries (for Full-Text Search)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#412-term-queries-for-structured-data","title":"4.1.2. Term Queries (for Structured Data)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-compound-queries","title":"4.2. Compound Queries","text":"

Compound queries allow for complex logic by combining multiple queries, enabling fine-grained control over query conditions and relevance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#421-bool-query","title":"4.2.1. Bool Query","text":"

The most flexible compound query, allowing logic-based combinations:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#complex-boolean-query-example","title":"Complex Boolean Query Example","text":"

Combining multiple clauses:

{\n  \"query\": {\n    \"bool\": {\n      \"must\": [\n        { \"match\": { \"title\": \"Elasticsearch\" } }\n      ],\n      \"should\": [\n        { \"match\": { \"category\": \"tutorial\" } },\n        { \"match\": { \"category\": \"guide\" } }\n      ],\n      \"must_not\": [\n        { \"term\": { \"status\": \"archived\" } }\n      ],\n      \"filter\": [\n        { \"range\": { \"publish_date\": { \"gte\": \"2023-01-01\" } } }\n      ]\n    }\n  }\n}\n
This query retrieves documents with \"Elasticsearch\" in the title, optionally boosts relevance if the document is in \"tutorial\" or \"guide\" categories, excludes documents marked as \"archived,\" and only includes documents published after January 1, 2023.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#422-dis-max-query-disjunction-max","title":"4.2.2. Dis Max Query (disjunction max):","text":"

Optimizes for the highest relevance score among multiple queries, often used when querying across similar fields with varied wording. - Example: Searching for the most relevant match between \u201ctitle\u201d and \u201cdescription\u201d fields:

{\n    \"query\": {\n        \"dis_max\": {\n            \"queries\": [\n                { \"match\": { \"title\": \"elastic search\" } },\n                { \"match\": { \"description\": \"elastic search\" } }\n            ]\n        }\n    }\n}\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#43-geo-queries","title":"4.3 Geo Queries","text":"

Elasticsearch provides several geo-specific queries for filtering and scoring documents based on geographic location:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#431-geo-bounding-box-query","title":"4.3.1 Geo Bounding Box Query","text":"

Defines a rectangular area by specifying two corner points (top-left and bottom-right). Documents with locations inside this box are matched.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#432-geo-distance-query","title":"4.3.2 Geo Distance Query","text":"

Finds documents within a certain distance from a point. Useful for proximity searches, like \"find stores within 10 miles.\"

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#433-geo-polygon-query","title":"4.3.3 Geo Polygon Query","text":"

Searches within a polygon defined by a series of latitude and longitude points, allowing for irregular area shapes.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#434-geo-shape-query","title":"4.3.4 Geo Shape Query","text":"

The geo_shape query allows for more complex spatial filtering, using pre-defined shapes like circles, polygons, or lines. This is often used with indexed geometries.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#435-using-geo-filters-with-bool-queries","title":"4.3.5 Using Geo Filters with Bool Queries","text":"

Geo filters are often used in combination with other query types within bool queries, allowing flexible, location-based filtering along with other criteria.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-of-a-combined-geo-and-bool-query","title":"Example of a Combined Geo and Bool Query","text":"

Finds published documents within a specific area and filters out archived content.

{\n  \"query\": {\n    \"bool\": {\n      \"must\": [\n        { \"term\": { \"status\": \"published\" } }\n      ],\n      \"filter\": {\n        \"geo_distance\": {\n          \"distance\": \"50km\",\n          \"location\": {\n            \"lat\": 40.7128,\n            \"lon\": -74.0060\n          }\n        }\n      },\n      \"must_not\": [\n        { \"term\": { \"status\": \"archived\" } }\n      ]\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-elastic-queries","title":"Summary of Elastic Queries","text":"

These queries, combined thoughtfully, make Elasticsearch highly adaptable to various search needs.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-aggregations","title":"5. Aggregations","text":"

Elasticsearch\u2019s aggregation framework is divided into metrics and bucket aggregations. Here\u2019s a deep dive into each, with subtypes and examples.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#51-metrics-aggregations","title":"5.1. Metrics Aggregations","text":"

These calculate values from field data, like sums or averages.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#52-bucket-aggregations","title":"5.2. Bucket Aggregations","text":"

These create groups (buckets) of documents based on field values or criteria. Each bucket can contain documents matching conditions and may contain further sub-aggregations.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#53-combining-aggregations","title":"5.3. Combining Aggregations","text":"

Each aggregation can nest other aggregations, allowing complex analysis structures.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-of-a-combined-aggregation","title":"Example of a Combined Aggregation","text":"

Calculate the average order amount by city and age range:

{\n  \"aggs\": {\n    \"by_city\": {\n      \"terms\": { \"field\": \"city\" },\n      \"aggs\": {\n        \"age_ranges\": {\n          \"range\": { \"field\": \"age\", \"ranges\": [{ \"to\": 20 }, { \"from\": 20, \"to\": 30 }, { \"from\": 30 }] },\n          \"aggs\": {\n            \"avg_order_amount\": { \"avg\": { \"field\": \"order_amount\" } }\n          }\n        }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-aggregations","title":"Summary of Aggregations","text":"

With these aggregations, Elasticsearch becomes a powerful analytics engine, enabling sophisticated data analysis directly within the index.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-sorting","title":"6. Sorting","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#61-basic-sorting","title":"6.1. Basic Sorting","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#62-field-based-sorting","title":"6.2. Field-based Sorting","text":"
{\n  \"sort\": [\n    { \"release_date\": { \"order\": \"desc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#63-multiple-sort-fields","title":"6.3. Multiple Sort Fields","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\" } },\n    { \"rating\": { \"order\": \"desc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#64-nested-sorting","title":"6.4. Nested Sorting","text":"
{\n  \"sort\": [\n    {\n      \"products.price\": {\n        \"order\": \"asc\",\n        \"nested\": {\n          \"path\": \"products\",\n          \"filter\": { \"range\": { \"products.price\": { \"gt\": 10 } } }\n        }\n      }\n    }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#65-geolocation-sorting","title":"6.5. Geolocation Sorting","text":"
{\n  \"sort\": [\n    {\n      \"_geo_distance\": {\n        \"location\": \"40.715, -73.988\",\n        \"order\": \"asc\",\n        \"unit\": \"km\"\n      }\n    }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#66-script-based-sorting","title":"6.6. Script-based Sorting","text":"
{\n  \"sort\": {\n    \"_script\": {\n      \"type\": \"number\",\n      \"script\": {\n        \"source\": \"doc['price'].value * params.factor\",\n        \"params\": { \"factor\": 1.2 }\n      },\n      \"order\": \"desc\"\n    }\n  },\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#67-missing-values","title":"6.7. Missing Values","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\", \"missing\": \"_last\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#68-sorting-in-aggregations","title":"6.8. Sorting in Aggregations","text":"
{\n  \"aggs\": {\n    \"top_brands\": {\n      \"terms\": {\n        \"field\": \"brand.keyword\",\n        \"order\": { \"_count\": \"desc\" }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-relevance-scoring","title":"7. Relevance Scoring","text":"

Elasticsearch's relevance scoring is crucial for ranking documents based on their similarity to a query. Here\u2019s an in-depth look:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#71-scoring-mechanism-and-bm25-algorithm","title":"7.1. Scoring Mechanism and BM25 Algorithm","text":"

The BM25 (Best Matching 25) algorithm is Elasticsearch\u2019s default relevance scoring algorithm. BM25 improves upon traditional TF-IDF (Term Frequency-Inverse Document Frequency) by adjusting term frequency saturation and document length normalization, providing more nuanced relevance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#core-components-of-bm25","title":"Core Components of BM25:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#72-calculating-the-bm25-score","title":"7.2. Calculating the BM25 Score","text":"

The BM25 formula combines these components, with two main parameters: - k1: Controls term frequency saturation (default around 1.2). Higher values give more influence to term frequency. - b: Controls length normalization (default around 0.75). Higher values penalize longer documents more strongly. - BM25 Alogirthm

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#73-understanding-scoring-in-elasticsearch-queries","title":"7.3. Understanding Scoring in Elasticsearch Queries","text":"

In Elasticsearch, relevance scores are generated by the \"match\" or \"multi_match\" queries. Each document receives a relevance score, and results are ranked based on these scores. You can inspect scores using the \"explain\": true parameter, which details each document\u2019s score and shows how BM25 factors contribute.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-query-with-relevance-scoring","title":"Example Query with Relevance Scoring:","text":"

{\n  \"query\": {\n    \"match\": {\n      \"content\": {\n        \"query\": \"Elasticsearch relevance scoring\",\n        \"boost\": 1.5\n      }\n    }\n  },\n  \"explain\": true\n}\n
This query searches for \"Elasticsearch relevance scoring\" in the content field. The \"boost\" parameter can emphasize this field for relevance, while \"explain\": true helps analyze the scoring breakdown.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#74-improving-relevance-with-advanced-techniques","title":"7.4. Improving Relevance with Advanced Techniques","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#75-practical-use-cases-for-bm25-in-elasticsearch","title":"7.5. Practical Use Cases for BM25 in Elasticsearch","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-relevance-score","title":"Summary of Relevance Score","text":"

Relevance scoring with BM25 is foundational to Elasticsearch\u2019s search quality, offering powerful controls for tuning results to your specific needs.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-pagination-and-cursors","title":"8. Pagination and Cursors","text":"

Pagination in Elasticsearch is essential for handling large result sets efficiently, as it prevents overwhelming the client and server. Elasticsearch offers different methods for pagination, each with specific use cases. Let's break down each method:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#81-basic-pagination-with-from-and-size","title":"8.1. Basic Pagination with from and size","text":"

Example:

{\n  \"from\": 20,\n  \"size\": 10,\n  \"query\": { \"match_all\": {} }\n}\n
This retrieves results from the 21st to the 30th position.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#82-search-after-for-deep-pagination","title":"8.2. Search After for Deep Pagination","text":"

Example:

{\n  \"sort\": [ { \"timestamp\": \"asc\" }, { \"id\": \"asc\" } ],\n  \"size\": 10,\n  \"query\": { \"match_all\": {} },\n  \"search_after\": [1627489200, \"XYZ123\"] \n}\n
Here, search_after takes the values from the timestamp and id fields of the last document on the previous page, ensuring seamless navigation.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#83-scroll-api-for-bulk-data-retrieval","title":"8.3. Scroll API for Bulk Data Retrieval","text":"

Example Workflow: - First, initiate a scroll session:

{\n  \"size\": 100,\n  \"query\": { \"match_all\": {} },\n  \"scroll\": \"1m\" \n}\n
- Use the _scroll_id returned by the initial request to retrieve subsequent pages:
{\n  \"scroll\": \"1m\",\n  \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAA...\"\n}\n

After each scroll request, repeat until the returned results are empty, which indicates that all documents have been retrieved.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#84-point-in-time-pit-for-real-time-pagination-with-consistency","title":"8.4. Point-in-Time (PIT) for Real-time Pagination with Consistency","text":"

Example Workflow: - First, initiate a Point-in-Time session:

POST /index_name/_pit?keep_alive=1m\n
- Use the pit_id with search_after for paginated queries:
{\n  \"size\": 10,\n  \"query\": { \"match_all\": {} },\n  \"pit\": { \"id\": \"PIT_ID\", \"keep_alive\": \"1m\" },\n  \"sort\": [ { \"timestamp\": \"asc\" }, { \"id\": \"asc\" } ],\n  \"search_after\": [1627489200, \"XYZ123\"]\n}\n
- Close the PIT session when done:
DELETE /_pit\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#85-limitations-and-considerations","title":"8.5. Limitations and Considerations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#86-summary-table-of-pagination-techniques","title":"8.6. Summary Table of Pagination Techniques","text":"Method Use Case Limitations Example Scenarios from & size Simple pagination for small datasets Performance drop for large from values Basic search pages, small datasets search_after Deep pagination without from overhead Requires sorted fields, can\u2019t skip pages Infinite scrolling, data tables with lots of records Scroll API Bulk data export/processing High memory usage, no real-time consistency Data migration, report generation Point-in-Time Consistent real-time pagination Needs frequent re-creation to avoid memory issues Dashboards, applications requiring consistent views

Each method serves specific needs, balancing consistency, performance, and real-time capabilities. This setup allows Elasticsearch to handle vast and dynamic datasets while supporting efficient data retrieval.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-facets-and-filters","title":"9. Facets and Filters","text":"

Faceting creates summaries of data, useful for search result filtering, like categorizing search results by price or brand. Filters, on the other hand, optimize performance by narrowing down documents without affecting scoring.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#91-facets-aggregations","title":"9.1. Facets (Aggregations)","text":"

Faceting is a process in Elasticsearch that aggregates search results, providing structured summaries for complex queries. For example, if searching for \"laptops,\" facets can aggregate results by price range, brand, or processor type, allowing users to filter search results dynamically.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#92-filters-filtering-without-scoring-impact","title":"9.2. Filters: Filtering without Scoring Impact","text":"

Filters enable the narrowing of search results by criteria (e.g., price < $500), improving query efficiency and bypassing relevance scoring. They\u2019re often used to pre-process data before a full-text search and work well with caches, resulting in faster query performance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#93-combining-facets-and-filters-in-search-applications","title":"9.3. Combining Facets and Filters in Search Applications","text":"

In complex search interfaces, facets allow users to drill down through categories, while filters further refine their selections without recalculating scores, ensuring responsive user experiences.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#94-sample-implementations","title":"9.4. Sample Implementations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#941-creating-facets-aggregations","title":"9.4.1. Creating Facets (Aggregations)","text":"

To implement facets, you\u2019ll define bucket aggregations to group and categorize data. For instance, creating a facet for \"price range\" and \"brand\" in a search for laptops:

GET /products/_search\n{\n  \"query\": {\n    \"match\": { \"description\": \"laptop\" }\n  },\n  \"aggs\": {\n    \"price_ranges\": {\n      \"range\": {\n        \"field\": \"price\",\n        \"ranges\": [\n          { \"to\": 500 },\n          { \"from\": 500, \"to\": 1000 },\n          { \"from\": 1000 }\n        ]\n      }\n    },\n    \"brands\": {\n      \"terms\": { \"field\": \"brand.keyword\" }\n    }\n  }\n}\n

This example provides a breakdown of price ranges and a count of each brand, creating flexible filters users can click on to refine results.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#942-using-filters-for-optimized-performance","title":"9.4.2. Using Filters for Optimized Performance","text":"

Filters improve performance by narrowing results without scoring. Here\u2019s an example of using a bool query with a filter clause:

GET /products/_search\n{\n  \"query\": {\n    \"bool\": {\n      \"must\": {\n        \"match\": { \"description\": \"laptop\" }\n      },\n      \"filter\": [\n        { \"term\": { \"in_stock\": true } },\n        { \"range\": { \"price\": { \"lt\": 1000 } } }\n      ]\n    }\n  }\n}\n

In this query, in_stock and price filters optimize search results without affecting scoring.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#95-performance-optimization-techniques","title":"9.5. Performance Optimization Techniques","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#951-caching-filters","title":"9.5.1. Caching Filters","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#952-minimize-full-text-search-in-filters","title":"9.5.2. Minimize Full-Text Search in Filters","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#953-selective-use-of-aggregations","title":"9.5.3. Selective Use of Aggregations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#954-balancing-shard-count","title":"9.5.4. Balancing Shard Count","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#cluster-architecture","title":"Cluster Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-cluster-overview-and-node-responsibilities","title":"1. Cluster Overview and Node Responsibilities","text":"

Each node type in an Elasticsearch cluster has specialized roles that allow it to handle different aspects of indexing, searching, and managing data. Let's explore the node types in detail.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-master-node","title":"1.1. Master Node","text":"

The master node is the cluster\u2019s brain, responsible for the overall management and health of the cluster.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-data-node","title":"1.2. Data Node","text":"

Data nodes are the primary nodes for storing data, processing indexing operations, and executing search and aggregation requests.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-coordinating-node","title":"1.3. Coordinating Node","text":"

Also known as the client node, the coordinating node acts as a router for client requests, managing query distribution and response aggregation.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-ingest-node","title":"1.4. Ingest Node","text":"

Ingest nodes preprocess data before it is indexed, often using ingest pipelines to transform and enrich data.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-cluster-hierarchy-overview","title":"2. Cluster Hierarchy Overview","text":"

Each level in the cluster architecture plays a role in organizing and distributing data efficiently across nodes.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-cluster","title":"2.1. Cluster","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-index","title":"2.2. Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-shards","title":"2.3. Shards","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-segments","title":"2.4. Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-documents","title":"2.5. Documents","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-elasticsearch-request-flow-and-processing","title":"3. Elasticsearch Request Flow and Processing","text":"

To deeply understand how request flow and cluster operations work in Elasticsearch, let\u2019s walk through each stage of the process in detail:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-receiving-a-request","title":"3.1. Receiving a Request","text":"

When a client sends a request to Elasticsearch, it can be either a query (search request) or an indexing (write) request. Here\u2019s how this begins:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-breaking-down-the-request","title":"3.2. Breaking Down the Request","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-execution-at-shard-level","title":"3.3. Execution at Shard Level","text":"

At this stage, each shard executes the request locally. This process differs slightly between a search request and an indexing request.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#331-query-execution-search-requests","title":"3.3.1. Query Execution (Search Requests)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#332-indexing-execution-write-requests","title":"3.3.2. Indexing Execution (Write Requests)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-response-consolidation","title":"3.4. Response Consolidation","text":"

After the query or indexing operation completes, the coordinating node consolidates the response:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-indexing-flow-details","title":"3.5. Indexing Flow Details","text":"

The indexing flow includes several key mechanisms that ensure data consistency and durability:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-request-flow-and-cluster-operations-summary","title":"3.6. Request Flow and Cluster Operations Summary","text":"

To visualize this, here\u2019s a simplified flow of the entire process:

  1. Client Sends Request (Search or Index) \u2192 Coordinating Node Receives Request
  2. Coordinating Node Identifies Relevant Shards (and chooses primary or replica shards)
  3. Execution on Shards:
  4. Query Phase (Search):
    • Query executed on selected shards.
    • Each shard returns IDs and scores of matching documents.
  5. Indexing Phase (Write):
    • Document written to the primary shard.
    • Changes forwarded to replica shards.
  6. Fetch Phase (Search):
  7. Fetches full documents for the top results.
  8. Consolidation and Response:
  9. Coordinating node merges, ranks, and sorts results for search.
  10. Coordinating node confirms write operation on all replicas for indexing.
  11. Final Response Sent to Client
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-additional-considerations","title":"3.7. Additional Considerations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-lucene-segments-and-index-structures","title":"4. Lucene, Segments, and Index Structures","text":"

To thoroughly understand Elasticsearch's storage and retrieval mechanisms, let\u2019s go deep into Lucene's segments, inverted index, and advanced data structures like BKD trees. Lucene, at its core, powers Elasticsearch, giving it the ability to handle and query massive datasets with impressive speed and efficiency.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-lucene-and-segments","title":"4.1. Lucene and Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#411-what-is-a-segment","title":"4.1.1. What is a Segment?","text":"

A segment in Lucene is a self-contained, immutable collection of documents that forms a subset of a shard. Each segment is essentially a mini-index with its own data structures, including inverted indexes, stored fields, and other data structures to facilitate efficient searching and retrieval.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#412-segment-merging","title":"4.1.2 Segment Merging","text":"

Example: - Imagine a shard with 100 small segments. Lucene might merge them into fewer, larger segments (say, 10 segments), consolidating their data and removing any \"marked as deleted\" documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#413-advantages-of-segments","title":"4.1.3. Advantages of Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-lucenes-inverted-index","title":"4.2. Lucene\u2019s Inverted Index","text":"

The inverted index is Lucene\u2019s most fundamental data structure and is the backbone of Elasticsearch\u2019s fast full-text search capabilities.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#421-structure-of-an-inverted-index","title":"4.2.1. Structure of an Inverted Index","text":"

The inverted index allows quick lookups by mapping terms to postings lists (lists of documents containing each term).

Example: - Suppose you index the text \"Elasticsearch is scalable search\". The inverted index might look like this:

Term          Documents\n------------------------\n\"Elasticsearch\" [1]\n\"is\"            [1, 2]\n\"scalable\"      [1, 3]\n\"search\"        [1]\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#422-advantages-of-inverted-index","title":"4.2.2. Advantages of Inverted Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#423-additional-optimizations-in-inverted-index","title":"4.2.3. Additional Optimizations in Inverted Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#43-bkd-trees-and-doc-values","title":"4.3. BKD Trees and Doc Values","text":"

Apart from the inverted index, Lucene also uses specialized data structures to handle numeric and spatial data.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#431-bkd-trees","title":"4.3.1. BKD Trees","text":"

BKD Trees are used in Elasticsearch for indexing and querying numeric, date, and geospatial data, especially for high-cardinality fields (fields with a large number of unique values).

Example: - Suppose you have a field geo_point representing user locations. A BKD tree indexes these coordinates, allowing Elasticsearch to quickly retrieve points within a bounding box or radius without scanning all documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#structure-of-a-bkd-tree","title":"Structure of a BKD Tree","text":"

BKD trees are essentially a form of a k-d tree (k-dimensional tree), optimized for indexing and searching over multiple dimensions. Each dimension can represent a distinct numeric field (e.g., latitude, longitude, or timestamp).

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#using-bkd-trees-for-queries","title":"Using BKD Trees for Queries","text":"

BKD trees efficiently handle:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-bkd-trees","title":"Advantages of BKD Trees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#432-doc-values","title":"4.3.2. Doc Values","text":"

Doc values enable efficient retrieval of field values for sorting, aggregation, and faceting. Instead of retrieving data from inverted indexes (which are optimized for search), doc values provide a columnar storage format that is ideal for analytical tasks.

Example: - Sorting results by price in a large index of products: - Doc values store price in a single column, which Elasticsearch reads to quickly sort documents without scanning each document individually.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#structure-of-doc-values","title":"Structure of Doc Values","text":"Doc values store fields in column-oriented storage rather than row-oriented storage:"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#types-of-doc-values","title":"Types of Doc Values","text":"

Doc values are defined by the field type:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#using-doc-values-in-queries","title":"Using Doc Values in Queries","text":"

Doc values are essential for operations such as:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-doc-values","title":"Advantages of Doc Values","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#433-summary-of-lucene-data-structures-in-elasticsearch","title":"4.3.3. Summary of Lucene Data Structures in Elasticsearch","text":"Data Structure Purpose Use Cases Benefits Segments Immutable sub-indices within a shard All document storage Concurrent searches, immutability Inverted Index Maps terms to documents Full-text search Fast term lookups BKD Trees Indexes numeric and multidimensional data Geospatial, timestamp queries Efficient range queries Doc Values Columnar storage for fields Sorting, aggregations Optimized memory usage Point Data Types Indexes geographic points (latitude-longitude) Proximity, bounding box queries Fast geospatial indexing"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#44-advanced-data-structures-in-lucene","title":"4.4. Advanced Data Structures in Lucene","text":"

Let's dive into the point data types and spatial indexes used in Elasticsearch, especially focusing on how it handles geospatial data with geo_point fields. We\u2019ll look at how Quadtrees and R-trees work, their role in spatial indexing, and how they support geospatial queries such as bounding box and proximity searches.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#441-overview-of-spatial-data-in-elasticsearch","title":"4.4.1. Overview of Spatial Data in Elasticsearch","text":"

Elasticsearch supports geospatial data using the geo_point and geo_shape data types: - geo_point: Stores latitude-longitude pairs for points on a map and is primarily used for proximity searches (e.g., \u201cfind locations within 10km\u201d). - geo_shape: Used for more complex shapes, such as polygons or multipoints, and is suitable for defining geographical areas like cities or lakes.

Geospatial queries include: - Bounding Box Queries: Searches for documents within a specific rectangle defined by coordinates. - Distance Queries: Searches for documents within a specified radius from a point. - Polygon Queries: Searches for documents within or intersecting with a complex polygonal area.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#442-quadtrees-and-r-trees-in-spatial-indexing","title":"4.4.2. Quadtrees and R-trees in Spatial Indexing","text":"

Quadtrees and R-trees are tree-based data structures that organize spatial data by dividing the space into hierarchical grids or regions, allowing efficient geospatial query processing.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#quadtrees","title":"Quadtrees","text":"

Quadtrees are hierarchical, 2-dimensional spatial indexes that recursively partition space into four quadrants or nodes, making them highly suitable for spatial data like latitude and longitude pairs.

Example: Imagine we have a city map with thousands of restaurants, each represented as a point (latitude, longitude). - A quadtree organizes the map into quadrants based on restaurant density. Denser regions are divided further to create sub-quadrants. - To find restaurants within a specific neighborhood, the quadtree quickly filters out distant quadrants, only scanning nearby ones, significantly speeding up search.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-quadtrees","title":"Advantages of Quadtrees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#r-trees","title":"R-trees","text":"

R-trees are another popular spatial data structure used to index multi-dimensional data (e.g., geographic shapes) by grouping nearby objects in bounding rectangles.

Example: Consider a map with various regions, like parks, lakes, and neighborhoods, each represented as a polygon. - An R-tree groups these polygons in bounding rectangles based on location. Polygons that are close to each other fall under the same rectangle. - When searching for parks within a 5km radius, the R-tree discards rectangles outside this range, only exploring relevant areas to find matching polygons.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-r-trees","title":"Advantages of R-trees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#443-lucenes-internal-use-of-quadtrees-and-r-trees","title":"4.4.3. Lucene's Internal Use of Quadtrees and R-trees","text":"

While Elasticsearch doesn\u2019t directly expose Quadtrees and R-trees as configurations, Lucene, its underlying search library, utilizes versions of these structures to handle spatial indexing efficiently.

Lucene optimizes these data structures to fit within its segment-based storage, allowing them to scale across multiple indices and segments, handling both large-scale geospatial queries and basic point-based distance queries.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#444-spatial-query-processing-in-elasticsearch","title":"4.4.4. Spatial Query Processing in Elasticsearch","text":"

Using these data structures, Elasticsearch processes spatial queries as follows:

  1. Bounding Box Query:
  2. For a rectangular region, Elasticsearch leverages Quadtrees to restrict the search space to quadrants that intersect with the bounding box. Points or shapes within these quadrants are retrieved.

  3. Distance Query:

  4. For a proximity search (e.g., finding locations within 5km of a point), the Geo Distance Filter calculates distances from a central point and retrieves points from quadrants or nodes that fall within this radius.

  5. Polygon Query:

  6. For complex polygons (e.g., \u201cfind all parks within a specific neighborhood\u201d), Elasticsearch uses an R-tree structure to store polygonal shapes in bounding rectangles, allowing fast intersection tests with other regions.
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#445-summary-table-of-spatial-data-structures-in-elasticsearch","title":"4.4.5. Summary Table of Spatial Data Structures in Elasticsearch","text":"Data Structure Purpose Use Cases Key Characteristics Quadtrees Efficient point indexing Bounding box, proximity searches Hierarchical grid of quadrants R-trees Complex shape and polygon indexing Intersection, overlap queries Bounding rectangles with hierarchical nodes BKD Trees Multi-dimensional numeric data Numeric and geo-distance filters Balanced k-d tree with blocks of data points"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#446-practical-applications-and-benefits","title":"4.4.6. Practical Applications and Benefits","text":"

These data structures optimize spatial queries in Elasticsearch, allowing it to handle diverse geospatial data efficiently. For example: - Bounding Box Queries are accelerated by Quadtrees, making them ideal for finding all points in a geographic area. - Distance Queries are optimized by both Quadtrees and BKD trees, allowing real-time retrieval of nearby points. - Polygon Queries are handled by R-trees, which efficiently manage irregular shapes and large polygons for accurate intersection checks.

By integrating these structures into Lucene, Elasticsearch supports powerful geospatial capabilities across various applications, including mapping services, logistics, and location-based searches.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#45-how-lucenes-structures-fit-into-elasticsearchs-query-flow","title":"4.5. How Lucene's Structures Fit into Elasticsearch\u2019s Query Flow","text":"
  1. Document Indexing:
  2. As documents are indexed, Lucene tokenizes text fields, stores terms in the inverted index, and creates doc values for fields that require sorting or aggregation.

  3. Segment Creation:

  4. Documents are grouped into segments, with each segment containing its own inverted index, BKD trees, and doc values.

  5. Query Execution:

  6. Term-based Queries: The inverted index quickly retrieves documents containing specific terms.
  7. Numeric or Geospatial Queries: BKD trees are used to retrieve documents within a certain numeric range or geographic area.
  8. Sorting and Aggregation: Doc values facilitate sorting by loading field values column-by-column rather than document-by-document.

Lucene\u2019s well-designed structures\u2014segments, inverted indexes, and multidimensional BKD trees\u2014create the foundation for Elasticsearch\u2019s speed and scalability, enabling it to support complex queries and large datasets efficiently.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-visual-representation-of-cluster-architecture-hierarchy-flow","title":"5. Visual Representation of Cluster Architecture (Hierarchy Flow)","text":"
Elasticsearch Cluster\n\u2514\u2500\u2500 Nodes\n    \u251c\u2500\u2500 Master Node\n    \u2502   \u251c\u2500\u2500 Manages cluster state\n    \u2502   \u2514\u2500\u2500 Handles shard allocation and rebalancing\n    \u251c\u2500\u2500 Data Node\n    \u2502   \u251c\u2500\u2500 Stores data and handles indexing/searching\n    \u2502   \u251c\u2500\u2500 Manages primary and replica shards\n    \u2502   \u2514\u2500\u2500 Processes local queries and aggregations\n    \u251c\u2500\u2500 Coordinating Node\n    \u2502   \u251c\u2500\u2500 Routes client requests to data nodes\n    \u2502   \u251c\u2500\u2500 Aggregates responses from data nodes\n    \u2502   \u2514\u2500\u2500 Sends final response to the client\n    \u2514\u2500\u2500 Ingest Node\n        \u251c\u2500\u2500 Processes and transforms data before indexing\n        \u2514\u2500\u2500 Enriches data with pipelines\n\nIndex\n\u2514\u2500\u2500 Shards (Primary and Replica)\n    \u2514\u2500\u2500 Lucene Index (Each shard is a Lucene index)\n        \u251c\u2500\u2500 Segments (Immutable data units in a shard)\n        \u2502    \u251c\u2500\u2500 Inverted Index\n        \u2502    \u251c\u2500\u2500 Doc Values\n        \u2502    \u2514\u2500\u2500 BKD Trees (for numeric & geo fields)\n        \u2514\u2500\u2500 Documents (JSON objects representing data records)(within segments)\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-of-cluster-roles-and-data-flow","title":"6. Summary of Cluster Roles and Data Flow","text":"Component Description Responsibilities Cluster Top-level structure with multiple nodes Manages overall data distribution, availability, and search Master Node Brain of the cluster Handles cluster state, shard allocation, and fault tolerance Data Node Primary storage and processing node Stores data, handles indexing, querying, and replica management Coordinating Node Routes and aggregates client requests Routes requests to data nodes, aggregates responses, and sends back to clients Ingest Node Data transformation node Preprocesses data with pipelines, ideal for parsing and enrichment Index Logical grouping of documents Organizes data for efficient storage and querying Shard Distributed subset of index data Represents a Lucene index with primary and replica copies Segment Immutable unit in a shard Stores indexed data for fast read access Document Basic data unit in segments JSON object representing individual data records"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#thread-pools","title":"Thread Pools","text":"

Diving into Elasticsearch\u2019s search and other thread pools is crucial to understanding its performance and scalability. These pools are essential for managing various tasks like indexing, searching, and handling incoming requests. Let\u2019s go through these pools from end to end, covering configurations, management, and performance metrics to monitor.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-overview","title":"1. Overview","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-detailed-breakdown-of-key-pools","title":"2. Detailed Breakdown of Key Pools","text":"

Here\u2019s a closer look at each pool, along with common configurations and considerations:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-configuring-and-tuning-thread-pools","title":"3. Configuring and Tuning Thread Pools","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-monitoring-metrics-for-thread-pools","title":"4. Monitoring Metrics for Thread Pools","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-example-scenarios-and-best-practices","title":"5. Example Scenarios and Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-table-of-pools","title":"6. Summary Table of Pools","text":"Thread Pool Purpose Default Threads Default Queue Size Key Metrics Tuning Tips Search Pool Processes search queries, aggregations # of processors * 3 1000 search_pool.active, search_pool.queue, search_pool.rejected Increase queue_size if many queries are queued; monitor memory usage to prevent OutOfMemory issues. Index Pool Handles indexing requests for documents # of processors 200 index_pool.active, index_pool.queue, index_pool.rejected For high indexing rates, increase queue size and thread count as necessary. Get Pool Retrieves individual documents # of processors * 2 1000 get_pool.active, get_pool.queue, get_pool.rejected Increase queue_size if retrieval requests are high; monitor latency and resource usage. Bulk Pool Processes bulk indexing operations # of processors * 2 50 bulk_pool.active, bulk_pool.queue, bulk_pool.rejected Keep queue_size modest to limit memory use; monitor latency during high bulk loads. Management Pool Manages maintenance tasks like merges 1\u20135 5 management_pool.active, management_pool.queue Generally low usage; monitor only if queue is frequently non-empty, indicating background task delays. Snapshot Pool Handles snapshot creation and restoration 1\u20132 5 snapshot_pool.active, snapshot_pool.queue Schedule snapshots during low-activity periods; adjust resources if snapshots interfere with other tasks.

These pools are essential to optimizing Elasticsearch\u2019s handling of diverse workloads. Monitoring and adjusting each pool based on your workload ensures better performance and resource management across the Elasticsearch cluster.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#caching","title":"Caching","text":"

In Elasticsearch, caching plays a vital role in speeding up queries by storing frequently accessed data at various levels, minimizing I/O operations and improving response times. Here\u2019s a detailed, end-to-end look at caching in Elasticsearch, from the lowest level to the highest, covering each caching mechanism and its role.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-types-of-caches-in-elasticsearch","title":"1. Types of Caches in Elasticsearch","text":"

There are several caching mechanisms in Elasticsearch, each working at a different level:

Each of these caches serves a specific purpose and optimizes a different aspect of query processing.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-filesystem-cache-os-level","title":"2. Filesystem Cache (OS-level)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-shard-level-cache","title":"3. Shard-level Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-field-data-cache","title":"3.1. Field Data Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-query-cache","title":"3.2. Query Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-request-cache","title":"3.3. Request Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-node-level-cache","title":"4. Node-level Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-segment-cache","title":"4.1. Segment Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-indices-cache","title":"4.2 Indices Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cache-tuning-and-best-practices","title":"5. Cache Tuning and Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-table-of-caching-mechanisms","title":"6. Summary Table of Caching Mechanisms","text":"Cache Type Level Purpose Eviction Policy Key Metrics Tuning Tips Filesystem Cache OS Stores files and segments in OS memory Managed by OS N/A Ensure ample OS memory for larger index caching Field Data Cache Shard-level Caches field data for aggregations and sorting LRU-based, configurable fielddata.memory_size, fielddata.evictions Increase size for high aggregation requirements Query Cache Shard-level Caches individual filter query results LRU-based, configurable query_cache.memory_size, query_cache.evictions, query_cache.hit_count, query_cache.miss_count Monitor hit/miss ratios to determine effectiveness Request Cache Shard-level Caches entire search request results LRU-based, per-index request_cache.memory_size, request_cache.evictions, request_cache.hit_count, request_cache.miss_count Best for aggregations on static data Segment Cache Node-level Caches Lucene index segments and postings Managed by Lucene segment.memory_in_bytes, segments.count Larger heap size improves cache efficiency Indices Cache Node-level Caches index metadata (mappings, settings) LRU-based indices.memory, indices.evictions Adjust based on frequency of metadata updates

Each caching layer works in tandem to optimize query speed and efficiency, and monitoring these caches is essential for fine-tuning Elasticsearch performance to meet your specific use cases.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#index-life-management-ilm","title":"Index Life Management - ILM","text":"

Elasticsearch\u2019s Hot-Warm-Cold (Hot-Warm-Cold-Frozen) architecture is part of its Index Lifecycle Management (ILM) system, designed to optimize storage and cost-efficiency for managing data that has different usage patterns over time. This architecture allows you to store data on different types of nodes (hot, warm, cold, and frozen) based on data retention needs and access frequency. Here\u2019s an in-depth look at each phase and how to effectively manage data with Elasticsearch\u2019s ILM:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-purpose-of-hot-warm-cold-architecture","title":"1. Purpose of Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-phases-in-ilm-index-lifecycle-management","title":"2. Phases in ILM (Index Lifecycle Management)","text":"

The ILM policy defines actions to transition data through different stages based on time or data access patterns:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-hot-phase-high-performance-node-configuration","title":"3. Hot Phase (High-Performance Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-warm-phase-mid-performance-node-configuration","title":"4. Warm Phase (Mid-Performance Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cold-phase-cost-effective-node-configuration","title":"5. Cold Phase (Cost-Effective Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-frozen-phase-archival-node-configuration","title":"6. Frozen Phase (Archival Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-delete-phase-optional-phase","title":"7. Delete Phase (Optional Phase)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-setting-up-ilm-policies","title":"8. Setting Up ILM Policies","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-cluster-resource-optimization-with-hot-warm-cold-architecture","title":"9. Cluster Resource Optimization with Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#10-example-ilm-policy","title":"10. Example ILM Policy","text":"

Here\u2019s an example ILM policy to illustrate phase-based transitions for a log analytics use case:

{\n  \"policy\": {\n    \"phases\": {\n      \"hot\": {\n        \"actions\": {\n          \"rollover\": {\n            \"max_age\": \"7d\",\n            \"max_size\": \"50gb\"\n          },\n          \"set_priority\": {\n            \"priority\": 100\n          }\n        }\n      },\n      \"warm\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n          \"allocate\": {\n            \"number_of_replicas\": 1\n          },\n          \"forcemerge\": {\n            \"max_num_segments\": 1\n          },\n          \"set_priority\": {\n            \"priority\": 50\n          }\n        }\n      },\n      \"cold\": {\n        \"min_age\": \"90d\",\n        \"actions\": {\n          \"allocate\": {\n            \"require\": {\n              \"data\": \"cold\"\n            }\n          },\n          \"freeze\": {},\n          \"set_priority\": {\n            \"priority\": 0\n          }\n        }\n      },\n      \"delete\": {\n        \"min_age\": \"365d\",\n        \"actions\": {\n          \"delete\": {}\n        }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-advantages-of-hot-warm-cold-architecture","title":"11. Advantages of Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-best-practices","title":"12. Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-summary-table","title":"13. Summary Table","text":"Phase Data Usage Hardware ILM Actions Typical Use Cases Hot Active, high access High CPU, SSD, large RAM Rollover, force merge Real-time search, recent logs, app monitoring Warm Mid-range access Moderate CPU, SSD or HDD, RAM Shrink, force merge, reallocate Data analytics on recent history, dashboard views Cold Infrequent access Low CPU, HDD, minimal RAM Freeze, move to cold nodes Compliance storage, infrequent analysis Frozen Rarely accessed Minimal CPU/RAM, cloud storage Unfreeze on access, move to frozen node Long-term archival, compliance data Delete Expired data N/A Delete Data lifecycle cleanup

This Hot-Warm-Cold architecture in Elasticsearch enables you to balance cost, performance, and data accessibility across various hardware configurations, ensuring that data is always stored cost-effectively without compromising on necessary access patterns.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#list-of-rest-apis","title":"List of REST APIs","text":"API Endpoint Purpose Description Typical Latency Use Cases Index APIs POST /{index}/_doc Index a Document Adds or updates a document in the specified index. Low (~5-10ms for small docs) Real-time data ingestion, document updates. POST /{index}/_bulk Bulk Indexing Allows indexing, updating, or deleting multiple documents in a single request. Low to Medium (~10-50ms) High-volume data ingestion, ETL processes. POST /{index}/_update/{id} Update a Document Partially updates a document by ID in the specified index. Low to Medium (~10-30ms) Updating specific fields in documents. DELETE /{index}/_doc/{id} Delete a Document Deletes a document by ID from the specified index. Low (~5-10ms) Document removal based on unique IDs. Search APIs GET /{index}/_search Search Documents Executes a search query with optional filters, aggregations, and pagination. Medium to High (~10-100ms, based on complexity) Full-text search, structured queries, analytics. POST /{index}/_search/scroll Scroll Search Enables retrieving large datasets by scrolling through search results. Medium to High (~50-200ms) Pagination for large datasets, data exports. DELETE /_search/scroll Clear Scroll Context Clears scroll contexts to free up resources. Low (~5ms) Resource management after scroll search. POST /_msearch Multi-Search Allows execution of multiple search queries in a single request. Medium to High (~20-150ms) Batch querying, dashboard visualizations. POST /{index}/_count Count Documents Counts the documents matching a query without returning full results. Low (~5-20ms) Quick counts of filtered datasets. Aggregation APIs POST /{index}/_search Aggregation Queries Used with the search API to retrieve aggregated data (e.g., histograms, averages). Medium to High (~20-150ms) Analytics, reporting, data summarization. Cluster and Node APIs GET /_cluster/health Cluster Health Returns health information on the cluster, nodes, and indices. Low (~5ms) Monitoring cluster health and node status. GET /_cluster/stats Cluster Statistics Provides statistics on cluster status, node usage, and storage. Low to Medium (~5-20ms) Cluster-wide monitoring and performance analysis. POST /_cluster/reroute Cluster Reroute Manually reroutes shards in the cluster. Medium (~20-50ms) Shard management and rebalancing. GET /_nodes/stats Node Statistics Returns stats for nodes, including CPU, memory, and thread pools. Low to Medium (~5-20ms) Node health monitoring, resource usage analysis. GET /_cat/nodes List Nodes Provides a list of all nodes in the cluster in a human-readable format. Low (~5ms) Node overview, node status. GET /_cat/indices List Indices Lists all indices with metadata on size, health, and document count. Low (~5ms) Index management and monitoring. Index Management APIs PUT /{index} Create Index Creates a new index with specific settings and mappings. Low (~10-20ms) Index setup and schema definition. DELETE /{index} Delete Index Deletes the specified index. Low (~5-10ms) Index removal, data management. PUT /{index}/_settings Update Index Settings Updates settings (e.g., refresh interval) of an index. Low (~10ms) Dynamic adjustments of index settings. POST /{index}/_refresh Refresh Index Refreshes an index to make recent changes searchable. Medium (~10-30ms) Ensures data is available for search in near-real-time. POST /{index}/_forcemerge Force Merge Index Reduces the number of segments in an index to optimize storage. High (~100ms - 1s+) Optimize index storage, improve search speed. Cache and Memory Management POST /{index}/_cache/clear Clear Index Cache Clears the cache for the specified index. Low (~5ms) Cache management for performance tuning. POST /_flush Flush Index Writes all buffered changes to disk. Medium (~10-30ms) Data durability, cache clearing. POST /{index}/_refresh Refresh Makes recent changes to an index visible to search. Medium (~10-30ms) Near real-time updates in the search index. Snapshot and Backup APIs PUT /_snapshot/{repo}/{snapshot} Create Snapshot Creates a snapshot of indices for backup. High (dependent on index size) Data backup, disaster recovery. GET /_snapshot/{repo}/{snapshot} Get Snapshot Status Checks the status of an existing snapshot. Low (~5ms) Monitor snapshot progress, status checks. DELETE /_snapshot/{repo}/{snapshot} Delete Snapshot Deletes an existing snapshot. Medium (~10-20ms) Snapshot lifecycle management, freeing storage. Security and Role Management POST /_security/role/{role} Create/Update Role Creates or updates a security role with specific permissions. Low (~5-10ms) Access control, role-based permissions. POST /_security/user/{user} Create/Update User Creates or updates a user in Elasticsearch. Low (~5-10ms) User management, access permissions. GET /_security/_authenticate Authenticate User Authenticates the current user. Low (~5ms) Session management, authentication checks."},{"location":"techdives/DistrubutedSystems/ElasticSearch/#additional-notes","title":"Additional Notes","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#capacity-planning-cluster-design","title":"Capacity Planning & Cluster Design","text":"Parameter Formula / Best Practice Description Example Calculation Total Data Nodes $ \\text{Total Shards} \\times \\text{Shard Size} / \\text{Node Disk Capacity} $ Calculate required data nodes based on total shard size and node storage. 20 shards of 30GB each on 600GB nodes \u2192 1 data node Heap Memory Allocation $ \\text{Total Node RAM} \\times 0.5 $ (up to 32GB max) Allocate 50% of node memory to JVM heap, up to 32GB, to optimize memory usage. Node with 64GB RAM \u2192 32GB heap Storage Requirement $ \\text{Total Data Size} \\times (1 + \\text{Replication Factor}) \\times \\text{Retention Period (months)} $ Plan storage capacity based on expected data size and replication. 1TB data, 1 replica, 1-month retention \u2192 2TB Ideal Shard Size 20-50 GB per shard Recommended shard size for optimal performance and manageability. Aim for shards ~30GB each for balanced load Shards per Index $ \\text{Total Data Size} / \\text{Target Shard Size} $ Calculate shard count to avoid excessive shard management overhead. 500GB index, 25GB target shard size \u2192 20 shards Max Shard Count Avoid more than 20 shards per GB of heap Ensure shard count doesn\u2019t exceed a level that can cause memory strain. 32GB heap \u2192 max ~640 shards across all indices Master Nodes Minimum of 3 Dedicated master nodes ensure quorum-based fault tolerance and cluster stability. At least 3 master nodes for high availability Coordinating Nodes 1 per 10 data nodes in large clusters Handle query load without adding data nodes, especially with complex aggregation queries. 50 data nodes \u2192 5 coordinating nodes CPU Requirement 4-8 CPUs per data node Allocate enough CPUs to handle search and indexing operations without bottlenecks. 4-8 CPUs per node for typical workloads Disk I/O Throughput Minimum of 300 MB/s (write-heavy) For write-intensive clusters, ensure sufficient I/O throughput for data nodes. SSDs or fast disks are recommended Disk Usage Threshold Keep below 75% disk usage per node Avoid exceeding 75% disk usage on data nodes to prevent performance degradation. Monitor for 75% threshold, Elasticsearch throttles at ~85% Index Throttle State Throttles at ~85% disk usage Elasticsearch throttles indexing if nodes exceed 85% disk usage. Configure alerts to prevent reaching throttle state Memory Usage - Field Data Cache indices.fielddata.cache.size based on available heap Field data cache size impacts sorting and aggregation speed, stored in memory. Set to 20-30% of available heap for high-aggregation workloads Query Cache Size 10% of heap by default (adjustable) Caches filter queries to speed up repeated search operations. Increase for clusters with frequent repetitive queries Request Cache Size Enabled per index, LRU-based Cache complete search requests, especially useful for aggregation-heavy queries on static data. Enable on indices with frequent aggregation queries Cluster Health GET /_cluster/health (monitor green, yellow, red status) Regularly monitor cluster health to identify potential issues in shard allocation and node status. Alerts for yellow or red statuses to detect unallocated shards Pending Tasks GET /_cluster/pending_tasks Track the number of pending tasks; delays may signal node or cluster overload. Monitor to ensure tasks are processed promptly CPU Usage per Node Track via node.cpu.percent Monitor CPU load on nodes, especially data and coordinating nodes handling heavy queries. Keep below 80% for balanced performance Search Latency Monitor search.fetch_time and search.query_time Search latency metrics indicate performance bottlenecks; high values can suggest tuning is needed. Target <100ms for interactive queries, <500ms for aggregations Indexing Latency Monitor indexing.index_time Tracks indexing speed; high values indicate indexing bottlenecks. Optimize disk I/O if consistently high GC Pause Time Track jvm.gc.collection_time Excessive GC pause time (>100ms) can degrade performance, especially on data nodes. Keep heap usage <75% to avoid frequent GC pauses Disk Utilization disk.used_percent Ensure disk usage remains within optimal limits to prevent resource contention. Monitor for high usage, keep below 75% Heap Usage per Node jvm.heap_used_percent Monitor heap usage across nodes; values near 100% can trigger frequent GC and degrade performance. Keep below 75% for stable performance Shard Count per Node Shards should not exceed 50-75 per data node Optimal shard count balances memory usage and search latency. Distribute shards evenly to avoid bottlenecks Index Rollover Frequency Based on data ingestion and retention policy Use index rollover for high-ingestion use cases to manage shard size and count. Time-based or size-based rollover (e.g., daily, 10GB) Snapshot Frequency Schedule during off-peak hours Regular snapshots for backup without affecting active workloads. Daily or weekly snapshots for disaster recovery Ingest Node CPU Requirement Optimize for transformation-heavy workloads Ingest nodes need higher CPU for ETL tasks before indexing. ~8 CPUs per ingest node for transformation-heavy clusters Write Thread Pool Size Controlled via thread_pool.write.size Configure thread pool size for write-heavy workloads. Default based on available processors; increase for high-write loads Bulk Thread Pool Size Set via thread_pool.bulk.size Bulk operations often have separate thread pools, useful for high-ingestion clusters. Default based on processors; increase if high bulk ingestion Query Throughput Measure search.thread_pool.queue The search queue size indicates if the search load is too high, leading to delays. Keep queue size low to avoid bottlenecks Bulk Queue Size Monitor bulk.thread_pool.queue Large bulk queue size indicates ingestion pressure; tune for high-ingestion needs. Increase queue size or add ingest nodes for bulk-heavy workloads Network Throughput Monitor network interface utilization High network usage can impact inter-node communication, especially during replication. Ensure sufficient network bandwidth for large clusters Network Latency Track round-trip time between nodes Low-latency network is critical for distributed search and replication. <1ms recommended within data centers, 1-5ms for cross-regions"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#questions","title":"Questions","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-sql-or-nosql","title":"1. SQL or NoSQL:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-main-feature-origin-and-design","title":"2. Main Feature, Origin, and Design:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-olap-or-oltp","title":"3. OLAP or OLTP:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-acid-or-base","title":"4. ACID or BASE:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cap-theorem","title":"5. CAP Theorem:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-cluster-structure","title":"6. Cluster Structure:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-fundamental-building-blocks","title":"7. Fundamental Building Blocks:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-multi-master-support","title":"8. Multi-Master Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-master-data-node-relation","title":"9. Master-Data Node Relation:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#10-node-structures-in-cluster","title":"10. Node Structures in Cluster:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-cluster-scaling-support","title":"11. Cluster Scaling Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-high-availability","title":"12. High Availability:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-fault-tolerance","title":"13. Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-replication","title":"14. Replication:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#15-partition","title":"15. Partition:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#16-sharding","title":"16. Sharding:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#17-caching-in-depth","title":"17. Caching in Depth:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#18-storage-type","title":"18. Storage Type:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#19-segmentpage-approach","title":"19. Segment/Page Approach:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#20-trees-for-storage","title":"20. Trees for Storage:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-indexing","title":"21. Indexing:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-routing","title":"22. Routing:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-latency","title":"23. Latency:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-versioning","title":"24. Versioning:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-locking-and-concurrency","title":"25. Locking and Concurrency:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#26-write-ahead-log-wal","title":"26. Write-Ahead Log (WAL):","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#27-change-data-capture-cdc","title":"27. Change Data Capture (CDC):","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#28-query-type-and-query","title":"28. Query Type and Query:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#29-query-optimizers","title":"29. Query Optimizers:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#30-sql-support","title":"30. SQL Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-circuit-breakers","title":"31. Circuit Breakers:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-data-retention-lifecycle-management","title":"32. Data Retention / Lifecycle Management:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-other-features","title":"33. Other Features:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-modules-or-libraries","title":"34. Modules or Libraries:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-optimization-and-tuning","title":"35. Optimization and Tuning:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-backup-and-recovery","title":"36. Backup and Recovery:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-security","title":"37. Security:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#38-migration","title":"38. Migration:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#39-recommended-cluster-setup","title":"39. Recommended Cluster Setup:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#40-basic-cluster-setup","title":"40. Basic Cluster Setup:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#references","title":"References","text":"
  1. https://www.elastic.co/guide/en/elasticsearch/reference/8.15/index.html
  2. https://www.hellointerview.com/learn/system-design/deep-dives/elasticsearch
  3. https://j.blaszyk.me/tech-blog/exploring-apache-lucene-index/
  4. https://medium.com/swlh/bkd-trees-used-in-elasticsearch-40e8afd2a1a4
  5. https://www.paradedb.com/blog/elasticsearch_vs_postgres
  6. https://nsvarun14.medium.com/capacity-planning-for-elasticsearch-cde3c0693add
  7. https://fdv.github.io/running-elasticsearch-fun-profit/004-cluster-design/004-cluster-design.html
  8. https://medium.com/@sureshkumar.pawar/sizing-your-elk-elasticsearch-logstash-kibana-cluster-for-high-performance-398fe6e591d4
  9. https://www.infoq.com/articles/similarity-scoring-elasticsearch/
  10. https://www.elastic.co/blog/practical-bm25-part-2-the-bm25-algorithm-and-its-variables
  11. https://medium.com/@niteshsaini/how-elasticsearch-calculates-its-relevance-score-e762c6274004
"},{"location":"techdives/DistrubutedSystems/Kafka/","title":"Kafka","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#what-is-kafka","title":"What is Kafka?","text":"

Kafka is a distributed event streaming platform that allows applications to publish, store, and consume streams of data in real-time or batch mode. It is designed to handle continuous streams of events or records by functioning as a distributed commit log, where data is written sequentially and can be read independently by multiple consumers.

Kafka follows a publish-subscribe model where producers write data to topics divided into partitions, and consumers pull data from those partitions at their own pace. Kafka ensures that data flows reliably between producers and consumers, with fault-tolerant replication and durable storage to prevent data loss.

At its heart, Kafka provides three core functionalities: 1. Message Streaming: Enabling systems to send and receive continuous streams of data asynchronously. 2. Durable Storage: Persisting messages on disk, ensuring data is not lost even in case of failures. 3. Distributed Processing: Allowing data to be partitioned and processed across multiple servers for scalability and fault tolerance.

"},{"location":"techdives/DistrubutedSystems/Kafka/#core-components-and-keywords","title":"Core Components and Keywords","text":"Component / Keyword Description Topic A logical channel for data, used to categorize messages. Topics are divided into partitions for parallelism. Partition A segment of a topic that stores messages in a log structure. It ensures parallel processing. Each partition contains messages with offsets to track their position. Producer A client or application that publishes messages to a Kafka topic. Producers can distribute messages across partitions. Consumer A client or application that subscribes to Kafka topics and reads messages from partitions at their own pace. Broker A Kafka server that stores messages, handles requests, and coordinates with other brokers in a Kafka cluster. Kafka Cluster A group of multiple Kafka brokers working together to provide scalability and fault tolerance. Zookeeper / KRaft Zookeeper is used for metadata management and leader election in older Kafka versions. Newer versions replace Zookeeper with KRaft (Kafka Raft) for native metadata management. Offset A unique identifier for each message within a partition, representing its position in the log. Consumers use offsets to track the messages they have processed. Replication Kafka duplicates partitions across multiple brokers to ensure fault tolerance and data availability. Leader Partition The primary replica of a partition that handles all reads and writes for that partition. Other replicas act as followers. Follower Partition A copy of the leader partition that replicates its data. If the leader fails, a follower can take over. Consumer Group A group of consumers sharing the same group ID, ensuring that each partition is consumed by only one member of the group at any given time. In-Sync Replica (ISR) A replica that is up-to-date with the leader partition. Kafka promotes an ISR as the new leader if the current leader fails. Acknowledgments (ACKs) A producer configuration that defines when a message is considered successfully sent (e.g., only after being replicated to all followers). Retention Policy A configuration that determines how long Kafka retains messages before deleting or compacting them. Messages can be removed based on time or size limits. Log Compaction A process that keeps only the latest version of a key within a topic, useful for data clean-up and long-term storage. Controller A designated broker responsible for managing partition leadership and cluster rebalancing. Kafka Streams A lightweight client library used for processing and analyzing data streams directly within Kafka. Kafka Connect A framework for integrating Kafka with external systems by providing connectors for data ingestion and extraction."},{"location":"techdives/DistrubutedSystems/Kafka/#how-kafka-achieves-high-scalability","title":"How Kafka Achieves High Scalability ?","text":"

Kafka\u2019s design is fundamentally scalable, allowing it to handle millions of events per second efficiently. It achieves this by leveraging distributed architecture, partitioning, horizontal scaling, and several load balancing strategies. Let\u2019s explore the mechanisms, architecture, and workflows that enable Kafka to scale end-to-end.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-partitioning-the-foundation-of-scalability","title":"1. Partitioning: The Foundation of Scalability","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#partition-key-and-load-distribution","title":"Partition Key and Load Distribution","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#impact-of-more-partitions","title":"Impact of More Partitions","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#2-distributed-broker-architecture","title":"2. Distributed Broker Architecture","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#partition-replication-across-brokers","title":"Partition Replication Across Brokers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#3-horizontal-scaling-add-more-brokers-or-partitions","title":"3. Horizontal Scaling: Add More Brokers or Partitions","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#scaling-the-producer-and-consumer-layers","title":"Scaling the Producer and Consumer Layers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#4-producer-scalability-load-balancing-and-batch-processing","title":"4. Producer Scalability: Load Balancing and Batch Processing","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#5-consumer-scalability-consumer-groups-and-load-sharing","title":"5. Consumer Scalability: Consumer Groups and Load Sharing","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#rebalancing-consumers","title":"Rebalancing Consumers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#6-efficient-load-balancing-and-rebalancing-mechanisms","title":"6. Efficient Load Balancing and Rebalancing Mechanisms","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#sticky-partitioning-strategy","title":"Sticky Partitioning Strategy","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#7-network-optimization-and-zero-copy-technology","title":"7. Network Optimization and Zero-Copy Technology","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#8-broker-level-optimizations-and-compression","title":"8. Broker-Level Optimizations and Compression","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#9-kafka-controller-and-partition-leadership","title":"9. Kafka Controller and Partition Leadership","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#10-handling-backpressure-for-scalability","title":"10. Handling Backpressure for Scalability","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#11-configuring-for-high-scalability-tuning-parameters","title":"11. Configuring for High Scalability: Tuning Parameters","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-high-scalability-mechanisms","title":"Summary of Kafka\u2019s High Scalability Mechanisms","text":"Aspect How Kafka Scales Partitioning Divides topics into partitions for parallelism. Distributed Brokers Workload spread across multiple brokers. Horizontal Scaling New brokers or partitions can be added dynamically. Producer Parallelism Producers write to multiple partitions concurrently. Consumer Groups Consumers share partitions for parallel processing. Rebalancing Redistributes workload when brokers/consumers change. Batching & Compression Reduces I/O and network overhead. Zero-Copy Technology Efficient data transfer with low CPU usage."},{"location":"techdives/DistrubutedSystems/Kafka/#how-kafka-achieves-high-availability-and-fault-tolerance","title":"How Kafka Achieves High Availability and Fault Tolerance ?","text":"

Kafka\u2019s design for high availability and fault tolerance centers on replication, leader election, distributed brokers, and self-healing mechanisms. Together, these mechanisms ensure Kafka can handle hardware, software, and network failures, while maintaining data integrity, durability, and continuity.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-partition-replication-foundation-of-ha-and-ft","title":"1. Partition Replication \u2013 Foundation of HA and FT","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#how-replication-ensures-ha-and-ft","title":"How Replication Ensures HA and FT:","text":"
  1. Leader-follower model: Only the leader replica handles read and write requests. Followers passively replicate the leader\u2019s data.
  2. Automatic failover: If the leader broker fails, one of the followers (from the in-sync replica set, or ISR) is promoted as the new leader, ensuring the partition remains available.
  3. This setup ensures continuous data availability and minimal downtime when individual brokers fail.
"},{"location":"techdives/DistrubutedSystems/Kafka/#2-leader-election-and-failover-mechanism","title":"2. Leader Election and Failover Mechanism","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#leader-election-process-during-failures","title":"Leader Election Process During Failures:","text":"
  1. Detection: Zookeeper (or KRaft) detects a failed broker.
  2. Election: The controller broker initiates a leader election for partitions on the failed broker.
  3. Promotion: An in-sync follower (replica that\u2019s fully up-to-date) is promoted to leader.
  4. Metadata update: Kafka updates cluster metadata to reflect the new leader, ensuring clients redirect requests to the new leader.

This automated and rapid leader election ensures Kafka remains operational with minimal interruption.

"},{"location":"techdives/DistrubutedSystems/Kafka/#3-in-sync-replicas-isr-ensuring-data-integrity","title":"3. In-Sync Replicas (ISR) \u2013 Ensuring Data Integrity","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#role-of-isr-in-minimizing-data-loss","title":"Role of ISR in Minimizing Data Loss:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#4-controller-broker-managing-ha-and-ft-orchestration","title":"4. Controller Broker \u2013 Managing HA and FT Orchestration","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#5-distributed-brokers-avoiding-single-points-of-failure","title":"5. Distributed Brokers \u2013 Avoiding Single Points of Failure","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#how-broker-distribution-improves-ha-and-ft","title":"How Broker Distribution Improves HA and FT:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#6-acknowledgment-policies-acks-ensuring-data-durability","title":"6. Acknowledgment Policies (ACKs) \u2013 Ensuring Data Durability","text":"

Kafka provides acknowledgment modes for tuning data durability against performance. These settings control when a message is considered \u201csuccessfully written\u201d:

"},{"location":"techdives/DistrubutedSystems/Kafka/#role-of-acks-in-fault-tolerance","title":"Role of ACKs in Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#7-log-compaction-and-retention-for-data-availability","title":"7. Log Compaction and Retention for Data Availability","text":"

Kafka employs log compaction and retention to maintain long-term data availability:

  1. Time-based retention: Kafka retains messages for a configurable period (e.g., 7 days).
  2. Size-based retention: Kafka deletes old messages once partition logs reach a certain size.
"},{"location":"techdives/DistrubutedSystems/Kafka/#log-compaction-for-data-recovery","title":"Log Compaction for Data Recovery:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#8-handling-split-brain-scenarios-consistency-through-quorums","title":"8. Handling Split-Brain Scenarios \u2013 Consistency Through Quorums","text":"

A split-brain scenario happens when a broker loses connectivity with others, risking data inconsistency.

"},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-approach-to-preventing-split-brain","title":"Kafka\u2019s Approach to Preventing Split-Brain:","text":"

This quorum-based replication prevents data corruption during network partitions.

"},{"location":"techdives/DistrubutedSystems/Kafka/#9-self-healing-and-automated-recovery","title":"9. Self-Healing and Automated Recovery","text":"

Kafka\u2019s self-healing mechanisms enable it to quickly recover from broker or replica failures:

These self-healing features maintain availability and data consistency without requiring manual intervention.

"},{"location":"techdives/DistrubutedSystems/Kafka/#10-multi-datacenter-replication-for-disaster-recovery","title":"10. Multi-Datacenter Replication for Disaster Recovery","text":"

Kafka supports multi-datacenter replication for cross-region fault tolerance using tools like Kafka MirrorMaker.

"},{"location":"techdives/DistrubutedSystems/Kafka/#how-multi-cluster-replication-ensures-availability-and-fault-tolerance","title":"How Multi-Cluster Replication Ensures Availability and Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#11-client-side-handling-of-failures-for-ha-and-ft","title":"11. Client-Side Handling of Failures for HA and FT","text":"

Kafka\u2019s producers and consumers have built-in resilience to handle failures gracefully:

"},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-high-availability-and-fault-tolerance-mechanisms","title":"Summary of Kafka\u2019s High Availability and Fault Tolerance Mechanisms","text":"Mechanism How It Ensures HA and FT Partition Replication Multiple copies of data across brokers ensure availability even during broker failures. Leader Election Automatically promotes a follower to leader when the leader broker fails. In-Sync Replicas (ISR) Only fully synchronized replicas can be promoted to leader to prevent data loss. Controller Broker Manages partition leadership and rebalancing operations, ensuring consistent cluster state. Distributed Brokers Spreads data across brokers to avoid single points of failure. Dynamic Rebalancing Adjusts workload across brokers and consumers to handle changes or failures smoothly. Acknowledgment Policies (ACKs) Ensures data is safely replicated before acknowledging writes, reducing data loss risk. Log Compaction Maintains the latest data state for recovery during consumer or application failures. Client-Side Recovery Producers and consumers handle broker failures with retries and automatic rebalancing. Network Partition Handling Uses Zookeeper/KRaft to prevent split-brain scenarios and ensure data consistency. Multi-Datacenter Replication Provides disaster recovery and redundancy across regions."},{"location":"techdives/DistrubutedSystems/Kafka/#kafka-features-impacts-on-ha-and-ft","title":"Kafka features Impacts on HA and FT","text":"Feature/Configuration Configuration Details Impact on HA Impact on FT Explanation Partition Replication Set a high replication factor (e.g., 3 or 5) High High Ensures multiple copies of data across brokers; if the leader fails, a follower can be promoted, maintaining data availability and durability. In-Sync Replicas (ISR) Use acks=all to ensure ISR sync before acknowledgments Moderate High Guarantees data consistency by ensuring messages are replicated to all ISR replicas before confirming writes, reducing data loss. Leader Election Mechanism Managed by Zookeeper or KRaft for automatic failover High Moderate Automatically promotes a follower to leader when the current leader fails, minimizing downtime. Controller Broker Redundancy provided by re-election if the current controller fails High Moderate Ensures the orchestrator of metadata and rebalancing has a backup, maintaining consistent cluster operations. Distributed Broker Placement Spread partitions across brokers; no two replicas on the same broker High High Reduces the risk of data unavailability and loss by preventing single points of failure. Rebalancing Strategy Configure partition reassignment for balanced broker load High Low Prevents overload on individual brokers, enhancing availability; this has less impact on data durability. Acknowledgment Policy (ACKs) Set acks=all for highest data durability Low High Ensures writes are only confirmed after replication to all ISR replicas, reducing the risk of data loss. Log Compaction Enable for compacted topics to retain latest state Moderate Moderate Retains the latest state of each key, useful for stateful applications; supports recovery but doesn\u2019t guarantee availability. Retention Policies Configure time-based or size-based retention High Low Maintains historical data for consumer recovery, contributing to high availability if consumers fall behind. Client Retry Mechanisms Configure producer and consumer retries High Low Enables producers and consumers to handle temporary broker unavailability, ensuring continuous operation. Consumer Group Rebalancing Set rebalancing policies to avoid bottlenecks High Low Ensures efficient load distribution among consumers, enhancing availability but minimally impacting data durability. Multi-Datacenter Replication Enable with Kafka MirrorMaker or similar tools High High Provides cross-region redundancy for both availability and fault tolerance, especially critical for disaster recovery. Backpressure Handling Use offset tracking and monitoring Moderate High Allows consumers to fall behind producers without causing data loss, enhancing fault tolerance by protecting against backpressure failures. Split-Brain Handling Managed by Zookeeper/KRaft to avoid conflicting writes Low High Prevents data inconsistency by ensuring only one leader exists per partition, critical for consistency in partitioned network conditions. Log Recovery Enable brokers to rebuild from log segments on restart Moderate High Ensures brokers can recover their state after a crash, minimizing data loss and ensuring continuity post-restart.

Kafka\u2019s architecture for high availability and fault tolerance ensures the system remains resilient under various failure scenarios. Through partition replication, leader election, dynamic rebalancing, and multi-datacenter replication, Kafka provides a robust infrastructure with no single points of failure and near-zero downtime, making it reliable for critical real-time data streaming and processing applications.

"},{"location":"techdives/DistrubutedSystems/Kafka/#what-makes-kafka-unique","title":"What Makes Kafka Unique ?","text":"

Append only Log-Based Architecture and High-Throughput with Low-Latency Design two of Kafka\u2019s core features that make it unique.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-log-based-architecture-the-foundation-of-kafkas-data-model","title":"1. Log-Based Architecture: The Foundation of Kafka\u2019s Data Model","text":"

Kafka\u2019s log-based architecture is what makes it fundamentally different from traditional messaging systems. It\u2019s built on the concept of a distributed, partitioned, and immutable log, allowing Kafka to scale, preserve data ordering, and enable consumers to replay data as needed. Here\u2019s a deep dive into what this architecture entails and why it\u2019s special.

"},{"location":"techdives/DistrubutedSystems/Kafka/#how-log-based-architecture-works","title":"How Log-Based Architecture Works","text":"
  1. Topics and Partitions as Logs:
  2. In Kafka, each topic is split into partitions, and each partition acts as an independent log.
  3. Within a partition, messages are appended sequentially in an ordered, immutable fashion, with each message assigned an offset (a unique, incremental ID).
  4. Offsets serve as pointers to each message\u2019s position within the log, making it easy for consumers to track their progress.

  5. Immutable Log Storage:

  6. Messages in each partition are stored as an append-only log\u2014once written, messages cannot be modified or deleted (unless retention policies specify otherwise).
  7. This immutability provides consistency and durability, as every message has a fixed position within the partition log.

  8. Replayable and Persistent Data:

  9. Kafka\u2019s log structure allows consumers to replay messages from any offset within the retention period. Consumers can reread data for recovery, reprocessing, or analytics without impacting other consumers or producers.
  10. Since Kafka retains messages based on time or size-based retention policies, consumers can pick up data where they left off or reprocess older data without affecting ongoing data flows.
"},{"location":"techdives/DistrubutedSystems/Kafka/#why-log-based-architecture-makes-kafka-unique","title":"Why Log-Based Architecture Makes Kafka Unique","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-log-based-architecture-in-practice","title":"Kafka\u2019s Log-Based Architecture in Practice","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#2-high-throughput-with-low-latency-design","title":"2. High-Throughput with Low-Latency Design","text":"

Kafka\u2019s design is optimized for handling millions of events per second with minimal delay, even under heavy load. This high throughput and low latency are achieved through a combination of disk I/O optimizations, data compression, and efficient network handling. Let\u2019s explore these in detail.

"},{"location":"techdives/DistrubutedSystems/Kafka/#key-components-of-kafkas-high-throughput-low-latency-design","title":"Key Components of Kafka\u2019s High-Throughput, Low-Latency Design","text":"
  1. Sequential Disk I/O:
  2. Kafka writes messages to disk sequentially rather than performing random writes. This significantly reduces seek time, as the disk head doesn\u2019t need to jump around to write or read data.
  3. Sequential writes take advantage of modern disks\u2019 ability to handle high-speed sequential I/O, especially in SSDs, allowing Kafka to process large volumes of data quickly.

  4. Page Cache Usage:

  5. Kafka leverages the OS\u2019s page cache to keep frequently accessed data in memory. By utilizing page cache, Kafka avoids direct disk reads for recently accessed data, reducing read latency.
  6. For producers writing data, Kafka batches messages in memory before flushing them to disk, improving throughput by reducing the number of disk write operations.

  7. Zero-Copy Data Transfer:

  8. Kafka uses zero-copy technology, specifically the sendfile() system call, to transfer data directly from disk to network sockets without additional memory copies.
  9. This allows Kafka to handle network I/O with minimal CPU usage, reducing latency and increasing throughput, especially for large messages.

  10. Batching and Compression:

  11. Kafka batches multiple messages into a single disk write or network packet, minimizing the number of I/O operations required.
  12. Batching not only increases efficiency but also improves compression rates by reducing network overhead. Kafka supports Gzip, Snappy, and LZ4 compression, reducing data size and thus speeding up data transfer.

  13. Network Optimization for Low Latency:

  14. Kafka\u2019s network layer is designed to handle high-speed data transfer between producers, brokers, and consumers. Kafka supports asynchronous data processing, allowing producers to continue sending messages without waiting for each acknowledgment, which further reduces latency.
  15. Kafka brokers are stateful, meaning they store partition offsets and metadata locally, reducing the need to rely on external systems for routing information. This minimizes delays in routing messages to the correct consumers.
"},{"location":"techdives/DistrubutedSystems/Kafka/#why-kafkas-throughput-and-latency-make-it-special","title":"Why Kafka\u2019s Throughput and Latency Make It Special","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-high-throughput-low-latency-design-in-practice","title":"Kafka\u2019s High-Throughput, Low-Latency Design in Practice","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-unique-log-based-architecture-and-high-throughput-design","title":"Summary of Kafka\u2019s Unique Log-Based Architecture and High-Throughput Design","text":"Aspect Log-Based Architecture High-Throughput with Low-Latency Design Core Principle Immutable, append-only logs for each partition. Optimized for sequential writes, memory management, and efficient data transfer. Data Storage Messages are stored as ordered, append-only logs in each partition, ensuring data immutability and ordering. Batching, compression, and zero-copy transfer ensure efficient storage and minimal latency in data handling. Fault Tolerance Replication across brokers enables resilience against broker failures, as partitions can be replayed and failover to available replicas. Brokers use page cache and zero-copy I/O to keep data transfer reliable under heavy load, even in the case of high data volumes. Parallelism and Scalability Partitions allow Kafka to scale horizontally, providing parallel processing across producers and consumers, with strict ordering within each partition. Sequential I/O and batching enable Kafka to handle high-throughput workloads, supporting massive parallelism across clients. Use Cases Event sourcing, state recovery, and audit logs where ordered, immutable data is essential. Real-time monitoring, analytics, and streaming data pipelines that require fast ingestion and minimal latency."},{"location":"techdives/DistrubutedSystems/Kafka/#recommended-kafka-settings","title":"Recommended Kafka Settings","text":"

Here are the organized into individual tables based on their sections. Each table provides a summary of the key settings along with recommended values and explanations.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-broker-level-settings","title":"1. Broker-Level Settings","text":"Setting Recommended Value Purpose replication.factor 3 Ensures redundancy and fault tolerance, allowing partition recovery if a broker fails. min.insync.replicas 2 Reduces data loss risk by ensuring at least two replicas acknowledge writes. num.partitions 6 (or based on load) Balances throughput and scalability; more partitions allow greater parallel processing. log.retention.hours 168 (7 days) Controls how long messages are retained; suitable for standard processing and replay needs. log.segment.bytes 1 GB Manages the segment size for optimal disk usage and performance in log rolling. log.cleanup.policy delete or compact delete for default; compact for retaining only the latest version of each key. compression.type producer, snappy, or lz4 Saves bandwidth and improves throughput, especially under high data volume conditions."},{"location":"techdives/DistrubutedSystems/Kafka/#2-producer-level-settings","title":"2. Producer-Level Settings","text":"Setting Recommended Value Purpose acks all Ensures that all in-sync replicas acknowledge writes, increasing reliability. retries Integer.MAX_VALUE Handles transient network issues by allowing indefinite retries, preventing message loss. retry.backoff.ms 100 Introduces a pause between retries, avoiding retry flooding and improving stability. enable.idempotence true Prevents duplicate messages by ensuring exactly-once semantics in data delivery. batch.size 32 KB Enhances throughput by accumulating records into batches before sending them. linger.ms 5 Small linger time allows batches to fill, reducing network overhead without delaying sends. compression.type snappy or lz4 Compresses data to reduce payload size, saving bandwidth and reducing transfer time."},{"location":"techdives/DistrubutedSystems/Kafka/#3-consumer-level-settings","title":"3. Consumer-Level Settings","text":"Setting Recommended Value Purpose auto.offset.reset earliest Ensures consumers start reading from the beginning if no offset is committed. max.poll.records 500 Controls the batch size per poll, balancing throughput and processing time. session.timeout.ms 30,000 Provides enough time for consumers to process data without triggering unnecessary rebalances. heartbeat.interval.ms 10,000 Sets the interval for heartbeat checks within the session timeout, reducing rebalance triggers. fetch.min.bytes 1 MB Improves fetch efficiency by waiting for a minimum data size before retrieving. fetch.max.bytes 50 MB Enables large batch sizes for high-throughput consumers, reducing network calls. enable.auto.commit false Disables automatic offset commits, allowing applications to commit only after processing."},{"location":"techdives/DistrubutedSystems/Kafka/#4-cluster-level-settings-for-high-availability-and-fault-tolerance","title":"4. Cluster-Level Settings for High Availability and Fault Tolerance","text":"Setting Recommended Value Purpose min.insync.replicas 2 Ensures at least two replicas must be in sync, providing better durability. controlled.shutdown.enable true Enables controlled shutdown, allowing brokers to gracefully transition leadership. unclean.leader.election.enable false Prevents out-of-sync replicas from being elected as leaders, protecting data consistency. num.network.threads 8 Increases concurrency for network traffic, supporting high-throughput applications. num.io.threads 8 Increases I/O concurrency, allowing efficient data transfer under heavy load. num.replica.fetchers 4 Enhances replication speed by allowing multiple fetcher threads for synchronizing replicas."},{"location":"techdives/DistrubutedSystems/Kafka/#5-zookeeper-settings","title":"5. Zookeeper Settings","text":"Setting Recommended Value Purpose zookeeper.session.timeout.ms 18,000 Prevents frequent Zookeeper disconnections during high loads, stabilizing metadata handling. zookeeper.connection.timeout.ms 6,000 Ensures reliable connections to Zookeeper, reducing the likelihood of leader election issues."},{"location":"techdives/DistrubutedSystems/Kafka/#6-kraft-kafka-raft-settings","title":"6. KRaft (Kafka Raft) Settings","text":"Setting Recommended Value Purpose process.roles broker,controller Defines the roles of Kafka nodes. A KRaft cluster typically has combined broker and controller roles, but can be split if desired. controller.quorum.voters List of controllers Specifies the list of controller nodes in the form nodeID@hostname:port, where each entry represents a voter in the Raft consensus group. controller.listener.names CONTROLLER Designates the listener name for inter-controller communication in the Raft quorum. controller.heartbeat.interval.ms 2,000 Sets the interval between heartbeats for controller nodes, ensuring they stay connected and responsive within the Raft quorum. controller.metrics.sample.window.ms 30,000 Configures the window size for collecting metrics, helping to monitor Raft performance over time. controller.log.dirs /path/to/controller/logs Specifies the directory where controller logs are stored. It\u2019s best to use a dedicated disk for controller logs to avoid I/O contention with brokers. metadata.log.segment.bytes 1 GB Controls the segment size for metadata logs, managing disk usage and log rolling frequency for metadata in KRaft mode. metadata.log.retention.bytes -1 (unlimited) Configures metadata log retention based on disk space, allowing infinite retention by default. Adjust based on available storage. metadata.log.retention.ms 604,800,000 (7 days) Retains metadata for a set duration; typically configured for a week to enable rollback in case of issues. controller.socket.timeout.ms 30,000 Sets the timeout for controller-to-controller connections, ensuring stability during network issues. leader.imbalance.check.interval.seconds 300 (5 minutes) Defines the interval at which the controller checks for leader imbalance, helping to maintain even load distribution across brokers.

Reference Links:

https://www.hellointerview.com/learn/system-design/deep-dives/kafka

"},{"location":"techdives/DistrubutedSystems/Redis/","title":"Redis","text":"

Redis is an open-source, in-memory data structure store that serves as a database, cache, message broker, and streaming engine. Its versatility and high performance make it a popular choice for various applications. This comprehensive guide delves into Redis's architecture, data structures, commands, deployment strategies, and best practices.

"},{"location":"techdives/DistrubutedSystems/Redis/#1-introduction-to-redis","title":"1. Introduction to Redis","text":"

Redis, short for Remote Dictionary Server, is renowned for its speed and flexibility. Operating primarily in memory, it supports various data structures and offers features like replication, persistence, and high availability.

"},{"location":"techdives/DistrubutedSystems/Redis/#2-core-data-structures","title":"2. Core Data Structures","text":""},{"location":"techdives/DistrubutedSystems/Redis/#21-strings","title":"2.1. Strings","text":"

Definition: A string in Redis is a binary-safe sequence of bytes. It's the most basic data type in Redis, with a maximum size of 512 MB.

Examples: - Set and get a simple string:

SET key \"Hello, World!\"\nGET key\n
- Increment a numerical value:
SET counter 10\nINCR counter      // Result: 11\nINCRBY counter 5  // Result: 16\n

Underlying Data Structure: Dynamic String (SDS - Simple Dynamic String)

Time Complexity: - SET key value: O(1) - GET key: O(1) - INCR key: O(1)

Best Use Cases: - Caching: Store frequently accessed values. - Counters: Track counts for metrics or events. - Session Data: Store serialized JSON for user sessions.

"},{"location":"techdives/DistrubutedSystems/Redis/#22-hashes","title":"2.2. Hashes","text":"

Definition: A hash in Redis is a collection of field-value pairs, ideal for representing objects (e.g., user profiles).

Examples: - Creating and managing user data in a hash:

HSET user:1001 name \"Alice\" age 30 city \"New York\"\nHGET user:1001 name           // Returns \"Alice\"\nHGETALL user:1001             // Returns all key-value pairs in hash\n

Underlying Data Structure: Hash Table or ZipList (for small hashes)

Time Complexity: - HSET key field value: O(1) (amortized) - HGET key field: O(1) - HGETALL key: O(N) (N being the number of fields in the hash)

Best Use Cases: - Storing Objects: Represent complex entities. - Configuration Settings: Store configurations as a set of key-value pairs.

"},{"location":"techdives/DistrubutedSystems/Redis/#23-lists","title":"2.3. Lists","text":"

Definition: A list is an ordered collection of strings, allowing elements to be added at either end, functioning much like a linked list.

Examples: - Using a list to store recent activity:

LPUSH recent_activity \"login\" \"view_profile\" \"logout\"\nLRANGE recent_activity 0 -1   // Fetches all elements in the list\n

Underlying Data Structure: Linked List or QuickList (optimized for performance and memory usage)

Time Complexity: - LPUSH key value: O(1) - LRANGE key start stop: O(S+N) (S being the starting offset and N the number of elements retrieved)

Best Use Cases: - Activity Streams: Store recent actions or logs. - Task Queues: Implement FIFO or LIFO queues.

"},{"location":"techdives/DistrubutedSystems/Redis/#24-sets","title":"2.4. Sets","text":"

Definition: Sets are unordered collections of unique strings, ideal for performing set operations.

Examples: - Managing unique tags:

SADD tags \"redis\" \"database\" \"in-memory\"\nSMEMBERS tags                 // Returns all unique tags\n

Underlying Data Structure: Hash Table or IntSet (for small sets of integers)

Time Complexity: - SADD key value: O(1) - SMEMBERS key: O(N) (N being the number of elements in the set) - SINTER key1 key2 ... keyN: O(N*M) (N being the number of sets and M the smallest set)

Best Use Cases: - Unique Values: Track unique items like IP addresses. - Social Networks: Represent social relationships (e.g., friends, followers).

"},{"location":"techdives/DistrubutedSystems/Redis/#25-sorted-sets","title":"2.5. Sorted Sets","text":"

Definition: Similar to sets, but with an associated score that allows elements to be sorted by score.

Examples: - Storing leaderboard data:

ZADD leaderboard 100 \"Alice\" 200 \"Bob\"\nZRANGE leaderboard 0 -1 WITHSCORES\n

Underlying Data Structure: Skip List and Hash Table (for fast access and sorted ordering)

Time Complexity: - ZADD key score member: O(log(N)) - ZRANGE key start stop: O(log(N)+M) (M being the number of elements returned)

Best Use Cases: - Leaderboards: Rank users based on scores. - Event Prioritization: Sort items by priority or timestamp.

"},{"location":"techdives/DistrubutedSystems/Redis/#26-bitmaps","title":"2.6. Bitmaps","text":"

Definition: Bitmaps use strings to store and manipulate individual bits, offering efficient binary storage.

Examples: - Tracking user flags:

SETBIT user_flags 5 1         // Sets the 6th bit to 1\nGETBIT user_flags 5           // Returns 1\n

Underlying Data Structure: String (each bit is set or retrieved from the byte representation)

Time Complexity: - SETBIT key offset value: O(1) - GETBIT key offset: O(1) - BITCOUNT key: O(N) (N being the length of the string)

Best Use Cases: - Feature Flags: Toggle features for users. - Activity Tracking: Record binary states like presence or attendance.

"},{"location":"techdives/DistrubutedSystems/Redis/#27-hyperloglogs","title":"2.7. HyperLogLogs","text":"

Definition: HyperLogLog is a probabilistic structure for approximating unique element counts.

Examples: - Counting unique visitors:

PFADD visitors \"user1\" \"user2\"\nPFCOUNT visitors             // Returns approximate unique count\n

Underlying Data Structure: Sparse and Dense Data Representations (optimized for low memory usage)

Time Complexity: - PFADD key element: O(1) - PFCOUNT key: O(1)

Best Use Cases: - Unique Counting: Approximate counts of unique views or visitors. - Low-Memory Use: Ideal for large datasets with memory constraints.

"},{"location":"techdives/DistrubutedSystems/Redis/#28-streams","title":"2.8. Streams","text":"

Definition: A stream is a log-like data structure for managing continuous data flows, supporting consumer groups.

Examples: - Tracking event streams:

XADD mystream * name \"Alice\" action \"login\"\nXREAD COUNT 2 STREAMS mystream 0\n

Underlying Data Structure: Radix Tree (used for efficient storage and traversal of stream entries)

Time Complexity: - XADD key * field value: O(log(N)) (N being the number of items in the stream) - XREAD key start stop: O(log(N)+M) (M being the number of items returned)

Best Use Cases: - Event Sourcing: Track ordered events or logs. - Message Queues: Reliable message distribution with consumer groups.

"},{"location":"techdives/DistrubutedSystems/Redis/#29-geospatial-indexes","title":"2.9. Geospatial Indexes","text":"

Definition: Redis provides commands for storing and querying location data with latitude and longitude.

Examples: - Adding and querying locations:

GEOADD cities 13.361389 38.115556 \"Palermo\"\nGEORADIUS cities 15 37 200 km\n

Underlying Data Structure: Geohash with Sorted Sets (uses sorted sets for indexing)

Time Complexity: - GEOADD key longitude latitude member: O(log(N)) - GEORADIUS key longitude latitude radius: O(log(N)+M) (M being the number of results)

Best Use Cases: - Location-Based Services: Search and display nearby locations. - Geofencing: Detect whether users enter specific geographic zones.

"},{"location":"techdives/DistrubutedSystems/Redis/#3-commands-table","title":"3. Commands Table","text":"Data Structure Definition Example Commands Best Use Cases Time Complexity Strings Binary-safe sequences of bytes for text or binary data. SET key value, GET key, INCR key, DECR key, APPEND key value, STRLEN key Caching, counters, session data SET: O(1), GET: O(1), INCR: O(1), APPEND: O(N) Hashes Collection of key-value pairs, suitable for objects. HSET key field value, HGET key field, HGETALL key, HDEL key field, HLEN key Storing objects, configuration settings HSET: O(1), HGET: O(1), HGETALL: O(N), HLEN: O(1) Lists Ordered collection of strings, acts like a linked list. LPUSH key value, RPUSH key value, LPOP key, RPOP key, LRANGE key start stop, LLEN key Activity streams, task queues LPUSH: O(1), LRANGE: O(S+N), LLEN: O(1) Sets Unordered collections of unique strings, optimized for sets. SADD key value, SREM key value, SMEMBERS key, SISMEMBER key value, SUNION key1 key2, SINTER key1 key2 Unique values, social relationships SADD: O(1), SMEMBERS: O(N), SINTER: O(N*M) Sorted Sets Sets with scores, allowing elements to be sorted by score. ZADD key score member, ZRANGE key start stop WITHSCORES, ZREM key member, ZSCORE key member, ZREVRANGE key start stop, ZCOUNT key min max Leaderboards, event prioritization ZADD: O(log(N)), ZRANGE: O(log(N)+M), ZSCORE: O(1) Bitmaps Stores and manipulates bits in a binary-safe string. SETBIT key offset value, GETBIT key offset, BITCOUNT key, BITOP operation destkey key1 key2 Feature flags, activity tracking SETBIT: O(1), GETBIT: O(1), BITCOUNT: O(N) HyperLogLogs Probabilistic structure for approximate unique counts. PFADD key element, PFCOUNT key, PFMERGE destkey sourcekey1 sourcekey2 Unique counting, low-memory usage PFADD: O(1), PFCOUNT: O(1), PFMERGE: O(N) Streams Log-like structure for managing continuous data flows. XADD key * field value, XREAD COUNT n STREAMS key, XGROUP CREATE key group consumer_id, XACK key group message_id, XDEL key message_id, XINFO key, XLEN key, XTRIM key MAXLEN ~ count Event sourcing, message queues XADD: O(log(N)), XREAD: O(log(N)+M), XGROUP: O(1), XACK: O(1) Geospatial Indexes Stores and queries location data with latitude and longitude. GEOADD key longitude latitude member, GEODIST key member1 member2, GEORADIUS key longitude latitude radius m km, GEORADIUSBYMEMBER key member radius m km, GEOHASH key member Location-based services, geofencing GEOADD: O(log(N)), GEORADIUS: O(log(N)+M)"},{"location":"techdives/DistrubutedSystems/Redis/#4-persistence-and-durability","title":"4. Persistence and Durability","text":"

Redis operates primarily as an in-memory database, prioritizing speed and low-latency operations. However, it provides two main persistence mechanisms to ensure data durability:

Advantages: Lower I/O overhead, compact file size.

Disadvantages: Risk of data loss between snapshots if Redis crashes.

Advantages: Better durability, logs every operation for more frequent data persistence.

Disadvantages: Larger file sizes, higher I/O usage.

Choosing Between RDB and AOF: You can use either or both of these methods in combination based on your application needs. For example, using both allows for rapid recovery (RDB) with high durability (AOF).

"},{"location":"techdives/DistrubutedSystems/Redis/#5-replication-and-high-availability","title":"5. Replication and High Availability","text":"

Redis supports replication to create replicas of the primary (master) instance, enabling multiple read replicas and providing redundancy.

Best for: - Applications requiring high availability with automatic failover. - Scenarios where read-heavy workloads benefit from scaling reads across multiple replicas.

"},{"location":"techdives/DistrubutedSystems/Redis/#6-clustering-and-scalability","title":"6. Clustering and Scalability","text":"

Redis supports sharding through Redis Cluster, which enables data partitioning across multiple nodes, allowing horizontal scalability and distributed storage. Redis Cluster uses hash slots to determine data distribution across nodes, ensuring no single node contains the entire dataset.

Considerations: - Redis Cluster supports most single-key commands, but multi-key operations are restricted unless all keys map to the same slot. - Clustering can increase complexity in handling data operations across nodes but is essential for large datasets needing horizontal scalability.

"},{"location":"techdives/DistrubutedSystems/Redis/#7-security-considerations","title":"7. Security Considerations","text":"

While Redis is often deployed in secure, private networks, security remains essential, especially for production environments:

"},{"location":"techdives/DistrubutedSystems/Redis/#8-client-libraries-and-tools","title":"8. Client Libraries and Tools","text":"

Redis has a robust ecosystem with libraries for popular programming languages, making it easy to integrate Redis across platforms:

Additionally, Redis CLI and Redis Insight are commonly used tools for managing and monitoring Redis instances.

"},{"location":"techdives/DistrubutedSystems/Redis/#9-rediss-single-threaded-nature-and-atomic-operations","title":"9. Redis\u2019s Single-Threaded Nature and Atomic Operations","text":"

Redis uses a single-threaded event loop to handle client requests, which makes operations simpler and efficient. This single-threaded model has specific implications:

Impact on Use Cases: - In scenarios where atomicity is essential, such as counters or distributed locks, Redis's single-threaded nature provides strong consistency guarantees. - For CPU-bound workloads, Redis may be limited by its single-threaded design. However, since Redis is primarily I/O-bound, it scales well for read-heavy or network-intensive applications.

Benefits of Single-Threaded Execution: - Simplicity in design and implementation, eliminating race conditions. - Predictable performance with guaranteed atomicity.

Drawbacks: - Limited to single-core processing for request handling. For CPU-bound tasks, Redis's single-threading may become a bottleneck, but horizontal scaling (e.g., Redis Cluster) can help distribute the load.

"},{"location":"techdives/DistrubutedSystems/Redis/#10-approximate-requests-per-second-rps","title":"10. Approximate Requests Per Second (RPS)","text":"Operation Type Description Approximate RPS per Instance Notes Simple Reads (GET) Basic read operation for retrieving a single value 100,000 - 150,000 RPS Higher performance achievable on optimized hardware Simple Writes (SET) Basic write operation for setting a single key-value pair 100,000 - 150,000 RPS Slightly reduced if using AOF with always persistence Complex Reads (e.g., ZRANGE) Reads on complex data structures like sorted sets 50,000 - 80,000 RPS Lower due to additional computation and memory access Complex Writes (e.g., ZADD) Writes on complex data structures like sorted sets 50,000 - 80,000 RPS Additional processing to maintain sorted order impacts performance With AOF (Append-Only File) Writes with always mode persistence (AOF) 60,000 - 80,000 RPS Slightly reduced due to disk I/O overhead Snapshotting (RDB) Writes with periodic snapshots (RDB) 80,000 - 100,000 RPS Minimal impact on RPS except during snapshotting periods when CPU/I/O load is higher With Redis Cluster Distributed across multiple nodes Millions of RPS (scales with nodes) Redis Cluster allows horizontal scaling, increasing RPS proportionally with additional nodes"},{"location":"techdives/DistrubutedSystems/Redis/#notes","title":"Notes:","text":""},{"location":"techdives/DistrubutedSystems/Redis/#11-use-cases-we-can-use-redis-in","title":"11. Use Cases We Can Use Redis In","text":""},{"location":"techdives/DistrubutedSystems/Redis/#111-caching","title":"11.1. Caching","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview","title":"Overview","text":"

Redis is highly effective as a caching layer, providing extremely low-latency data retrieval that reduces the load on backend databases and improves application performance.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works","title":"How It Works","text":"
  1. Cache Common Data: Redis is commonly used to cache data that is expensive to compute or retrieve, such as:
  2. API responses
  3. Frequently queried database results
  4. Configuration settings

  5. Expiration and Eviction: Redis supports configurable expiration for keys, which allows cached data to expire after a specific time. It also supports eviction policies (like LRU or LFU) to automatically remove older or less-used items when memory limits are reached.

"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#112-session-management","title":"11.2. Session Management","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_1","title":"Overview","text":"

Redis is commonly used as a session store for web applications, especially in distributed environments where sharing session data across multiple servers is critical.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_1","title":"How It Works","text":"
  1. Store Session Data: Redis stores session data, often as a hash, using a unique session identifier as the key.
  2. Session Expiry: Redis supports setting time-to-live (TTL) for session keys, allowing automatic expiration of inactive sessions.
  3. Distributed Access: Applications running on multiple servers can access the same session data via Redis, providing a centralized session store.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_1","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_1","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#113-real-time-analytics","title":"11.3. Real-Time Analytics","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_2","title":"Overview","text":"

Redis\u2019s support for data structures like HyperLogLogs, sorted sets, and streams enables it to handle real-time analytics, tracking metrics, counts, and trends without requiring a traditional database.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_2","title":"How It Works","text":"
  1. HyperLogLog for Unique Counts: Track unique visitors, page views, and other metrics using HyperLogLog, which approximates the count of unique items.
  2. Sorted Sets for Ranking: Track and rank items based on scores, useful for leaderboards or tracking user activity levels.
  3. Streams for Event Data: Redis streams can capture continuous event data, making it possible to analyze data in real time or replay it later.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_2","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_2","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#114-message-brokering","title":"11.4. Message Brokering","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_3","title":"Overview","text":"

Redis\u2019s publish/subscribe (pub/sub) feature allows it to act as a lightweight message broker, facilitating real-time communication between distributed applications.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_3","title":"How It Works","text":"
  1. Publisher: A service or application publishes messages to a specific channel.
  2. Subscriber: Other services or applications subscribe to that channel to receive messages.
  3. Message Delivery: Messages are delivered to all active subscribers listening to the channel at the time of publication.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_3","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#example-commands","title":"Example Commands","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_3","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#115-geospatial-applications","title":"11.5. Geospatial Applications","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_4","title":"Overview","text":"

Redis provides geospatial commands that make it suitable for applications requiring location-based searches and geofencing, such as ride-sharing or delivery tracking.

Redis uses a geohashing-like approach to handle geospatial data, but it combines it with sorted sets to enable efficient location-based queries, Redis converts latitude and longitude coordinates into a geohash-like value, which is then stored as a score in a sorted set. This encoding allows Redis to store location data compactly and enables efficient proximity queries.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_4","title":"How It Works","text":"
  1. Store Location Data: Use GEOADD to add locations with latitude, longitude, and an associated member (e.g., user ID or landmark).
  2. Location-Based Queries: Redis allows querying locations within a specified radius and finding distances between locations.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_4","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#example-commands_1","title":"Example Commands","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_4","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#116-summary-table","title":"11.6 Summary Table","text":"Use Case Description Key Commands Benefits Caching Store frequently accessed data for faster retrieval. SETEX, GET, DEL Reduces latency, lowers database load Session Management Store user sessions for distributed web applications. HSET, HGETALL, EXPIRE Fast, centralized session access across servers Real-Time Analytics Track metrics, counts, and trends in real time. PFADD, PFCOUNT, ZADD, XADD, XREAD Provides instant insights, reduces need for dedicated platforms Message Brokering Facilitate real-time communication between services. PUBLISH, SUBSCRIBE Real-time updates, lightweight message broker Geospatial Apps Perform location-based searches and calculations. GEOADD, GEORADIUS, GEORADIUSBYMEMBER Efficient geospatial operations for location-based services"},{"location":"techdives/DistrubutedSystems/Redis/#12-redis-issues","title":"12. Redis Issues","text":"

Let's dive deep into some key challenges, such as hot key issues, cache avalanche, cache penetration, cache stampede, and their corresponding solutions.

"},{"location":"techdives/DistrubutedSystems/Redis/#121-hot-key-issue","title":"12.1. Hot Key Issue","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description","title":"Description","text":"

A hot key issue occurs when a single key in Redis is accessed extremely frequently, causing uneven load distribution. This can happen in applications where certain data (e.g., a trending topic or popular product) is heavily requested. A hot key can overwhelm the Redis server or specific nodes in a Redis Cluster, leading to latency spikes and reduced performance.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions","title":"Solutions","text":"
  1. Replicate the Key:
  2. Store multiple copies of the hot key in Redis (e.g., hotkey_1, hotkey_2, hotkey_3). Then, use application logic to randomly pick a replica each time the key is accessed. This distributes the load across multiple keys.

  3. Use Redis Cluster:

  4. In a Redis Cluster, distribute the load by sharding hot keys across nodes. This may not completely eliminate the issue, but it can help mitigate its impact by spreading access across the cluster.

  5. Client-Side Caching:

  6. Implement a local cache on the client side or within the application servers to reduce the frequency of requests to Redis. This technique works well when the data is static or changes infrequently.

  7. Use a Load-Balancing Proxy:

  8. Use a Redis proxy (like Twemproxy or Codis) to balance requests to the hot key across multiple Redis instances.
"},{"location":"techdives/DistrubutedSystems/Redis/#122-cache-avalanche","title":"12.2. Cache Avalanche","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_1","title":"Description","text":"

A cache avalanche occurs when many cache keys expire at once, leading to a sudden flood of requests to the backend database as the cache misses accumulate. This can overwhelm the database, causing latency spikes or even outages.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_1","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_1","title":"Solutions","text":"
  1. Add Randomized Expiry Times:
  2. Set expiration times with a randomized offset (e.g., add a few seconds or minutes randomly) to avoid simultaneous expiry. For example:

    ttl = 3600 + random.randint(-300, 300)  # 3600 seconds +/- 5 minutes\n

  3. Cache Pre-Warming:

  4. Preload critical data into Redis before it expires. You can use background jobs to check key expiration and refresh data periodically.

  5. Lazy Loading with Synchronized Locking:

  6. Use a distributed locking mechanism to ensure that only one thread refreshes the data in Redis, while others wait. This can prevent multiple processes from overloading the backend database.

  7. Fallback Graceful Degradation:

  8. Implement a mechanism that provides stale or default data temporarily if the database is overwhelmed. This approach buys time until the cache is repopulated.
"},{"location":"techdives/DistrubutedSystems/Redis/#123-cache-penetration","title":"12.3. Cache Penetration","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_2","title":"Description","text":"

Cache penetration happens when requests for non-existent keys repeatedly bypass the cache and go to the backend database. Since these keys don\u2019t exist, they are never cached, resulting in continuous database requests, increasing the load on the database.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_2","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_2","title":"Solutions","text":"
  1. Cache Null Values:
  2. When a request results in a database miss, store a null value in Redis with a short TTL (e.g., 5 minutes). Future requests for the same key will hit Redis instead of the database. Example:

    if not redis.exists(\"non_existent_key\"):\n    data = fetch_from_database(\"non_existent_key\")\n    if data is None:\n        redis.setex(\"non_existent_key\", 300, None)  # Cache null for 5 minutes\n

  3. Input Validation:

  4. Filter out clearly invalid requests before querying Redis or the backend. For instance, if certain key patterns are obviously invalid, ignore them early in the request flow.

  5. Bloom Filter:

  6. Implement a Bloom filter at the cache layer to quickly determine if a key likely exists in the database. This reduces unnecessary database calls by discarding requests for non-existent keys without hitting Redis or the backend.
"},{"location":"techdives/DistrubutedSystems/Redis/#124-cache-stampede","title":"12.4. Cache Stampede","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_3","title":"Description","text":"

A cache stampede occurs when multiple threads or clients attempt to update an expired cache key simultaneously, causing a burst of requests to the backend database. This is similar to a cache avalanche but occurs at the key level rather than across all keys.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_3","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_3","title":"Solutions","text":"
  1. Mutex Locking:
  2. Use a distributed lock (e.g., Redlock) to ensure that only one client refreshes the cache while others wait. This reduces the load on the database:

    # Pseudocode for acquiring a lock\nif redis.setnx(\"lock:key\", 1):\n    try:\n        # Fetch and cache the data\n        data = fetch_from_database(\"key\")\n        redis.setex(\"key\", 3600, data)\n    finally:\n        redis.delete(\"lock:key\")  # Release the lock\n

  3. Early Re-Caching (Soft Expiration):

  4. Implement soft expiration by setting a short expiration on frequently requested keys and refreshing them asynchronously before they expire. This keeps the data fresh in Redis and avoids a stampede.

  5. Leverage Stale Data:

  6. Allow clients to use slightly stale data by extending the expiration time if a refresh is already in progress. This minimizes the load on the backend.
"},{"location":"techdives/DistrubutedSystems/Redis/#125-memory-and-eviction-issues","title":"12.5. Memory and Eviction Issues","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_4","title":"Description","text":"

Redis operates in memory, so it has a limited capacity. When Redis reaches its memory limit, it must evict keys to free up space, potentially removing critical data. Improper eviction policies can lead to cache churn and data inconsistency.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_4","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_4","title":"Solutions","text":"
  1. Choose an Appropriate Eviction Policy:
  2. Redis offers multiple eviction policies (noeviction, allkeys-lru, volatile-lru, allkeys-lfu, etc.). Choose one that matches your data access patterns. For instance:

    • LRU (Least Recently Used): Removes least recently accessed keys, suitable for caching.
    • LFU (Least Frequently Used): Removes keys that are less frequently accessed.
  3. Optimize Data Size:

  4. Reduce the memory footprint by optimizing data storage, such as using shorter key names or serializing data efficiently (e.g., storing integers directly rather than as strings).

  5. Monitor and Scale:

  6. Continuously monitor Redis memory usage with tools like Redis CLI or Redis Insights. If memory usage grows, consider horizontal scaling with Redis Cluster.

  7. Use Redis as a Pure Cache:

  8. Configure Redis as a pure cache by setting appropriate TTLs on keys and using an eviction policy that maintains the most valuable data.
"},{"location":"techdives/DistrubutedSystems/Redis/#126-slow-queries-and-latency-issues","title":"12.6. Slow Queries and Latency Issues","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_5","title":"Description","text":"

Redis is designed for fast access, but certain operations can cause high latency, especially when handling large datasets or complex commands like ZRANGE on large sorted sets.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_5","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_5","title":"Solutions","text":"
  1. Optimize Commands:
  2. Avoid commands that can block or are computationally expensive. For example, break large list processing into smaller ranges instead of processing the entire list.

  3. Monitor Slow Queries:

  4. Use the Redis Slow Log to identify and optimize slow commands. Redis provides insights into commands that exceed a specified execution time threshold.

  5. Use Sharding or Clustering:

  6. Split large datasets across multiple nodes in a Redis Cluster to balance the load and reduce the impact of slow commands on any single node.
"},{"location":"techdives/DistrubutedSystems/Redis/#13-tuning-redis","title":"13. Tuning Redis","text":"Metric Description Tuning Recommendations Commands/Tools Memory Usage Measures the memory consumed by Redis, including all data stored in memory. - Monitor and limit data size per key. - Use appropriate eviction policies (allkeys-lru, allkeys-lfu, etc.). - Compress data (e.g., shorter key names). - Use Redis MEMORY USAGE to check memory footprint of specific keys. INFO memory, MEMORY USAGE CPU Utilization CPU load on the Redis server, indicating overall processing load. - Reduce CPU-intensive commands (ZRANGE on large sets, large LRANGE operations). - Offload tasks to background or batch processing if possible. - Use pipelining for batch operations. System tools (e.g., top), INFO cpu Cache Hit Ratio The ratio of cache hits to total cache requests (ideally close to 1). - Identify hot keys and cache them effectively. - Increase Redis memory if hit ratio is low due to evictions. - Ensure sufficient TTL to avoid frequent cache misses. INFO stats Evicted Keys Number of keys evicted due to memory limits. - Increase available memory if eviction is high. - Choose an appropriate eviction policy (allkeys-lru, volatile-ttl, etc.). - Adjust key TTLs to prevent frequent eviction of important data. INFO memory Connected Clients Number of clients connected to Redis at any given time. - Increase the maxclients configuration if reaching limits. - Use client-side caching to reduce load on Redis. INFO clients, CLIENT LIST Latency (Command Time) Measures the average response time per command in milliseconds. - Avoid using blocking or heavy commands on large data sets. - Distribute large data across a Redis Cluster. - Monitor slow log for commands that exceed expected time. SLOWLOG GET, INFO commandstats Command Rate Rate of commands per second, which affects overall performance. - Spread load across multiple Redis instances if command rate is high. - Use pipelining to reduce round-trips. - Optimize or reduce the frequency of unnecessary commands. INFO stats Key Expirations Number of keys that expire per second. - Add randomized TTLs to prevent cache avalanches. - Pre-warm critical keys to avoid sudden cache misses. - Monitor TTL settings to ensure balanced expiration. INFO stats Replication Lag Delay in data synchronization between master and replica nodes. - Tune repl-backlog-size for better sync reliability. - Monitor network latency and throughput between master and replica. - Use Redis Sentinel for reliable failover. INFO replication, REPLCONF Data Persistence Durability How frequently Redis saves data to disk (AOF/RDB). - Use RDB for infrequent snapshots; use AOF for higher durability. - Tune AOF rewrite frequency (auto-aof-rewrite-percentage). - Adjust RDB save intervals based on data criticality. CONFIG SET save, CONFIG SET appendonly Keyspace Misses Number of attempts to access non-existent keys. - Cache null values temporarily for non-existent keys to reduce misses. - Add input validation to filter invalid requests. - Use Bloom filters for non-existent keys in high-traffic systems. INFO stats, MEMORY USAGE Redis Slow Log Logs slow-running commands that exceed a threshold. - Use SLOWLOG to monitor commands that exceed time limits. - Adjust commands and optimize keys based on slow log findings. - Tune slowlog-log-slower-than to track performance bottlenecks. SLOWLOG GET, SLOWLOG RESET Network Bandwidth Measures bandwidth usage, impacting latency and speed. - Use Redis clustering to reduce network load on a single instance. - Enable pipelining and compression where possible. - Monitor and minimize network latency for high-frequency queries. System tools (e.g., ifconfig), INFO Eviction Policy Determines which keys Redis evicts first when memory limit is reached. - Choose policies based on use case (allkeys-lru, allkeys-lfu for caching, volatile-ttl for expiring keys first). - Regularly review and adjust TTLs for key eviction optimization. CONFIG SET maxmemory-policy, INFO memory Persistence Overhead Memory and CPU impact due to persistence settings (RDB or AOF). - Adjust save intervals or AOF rewriting to reduce persistence load. - Use a combination of AOF and RDB if the application requires high durability with performance. INFO persistence, CONFIG SET save Cluster Slot Utilization Measures how well data is balanced across Redis Cluster slots. - Rebalance slots if certain nodes handle disproportionate load. - Use Redis Cluster sharding to ensure balanced key distribution. - Regularly monitor slots and reshard as needed. CLUSTER INFO, CLUSTER NODES, CLUSTER REBALANCE"},{"location":"techdives/DistrubutedSystems/Redis/#14-best-practices","title":"14. Best Practices","text":"

To make the most of Redis:

"},{"location":"techdives/DistrubutedSystems/Redis/#15-questions","title":"15. Questions","text":"

Here\u2019s a structured Q&A-style deep dive into Redis to address all of these technical aspects.

"},{"location":"techdives/DistrubutedSystems/Redis/#1-sql-or-nosql-if-nosql-what-type-of-nosql","title":"1. SQL or NoSQL? If NoSQL, what type of NoSQL?","text":"

Q: Is Redis an SQL or NoSQL database?

A: Redis is a NoSQL database. Specifically, it is a key-value store that supports various data structures (e.g., strings, hashes, lists, sets, sorted sets, streams, bitmaps, and geospatial indexes).

"},{"location":"techdives/DistrubutedSystems/Redis/#2-type-of-db-supports-polymorphic","title":"2. Type of DB \u2026 Supports Polymorphic?","text":"

Q: What type of NoSQL database is Redis, and does it support polymorphism?

A: Redis is a key-value in-memory data store with support for a variety of data structures. Redis does not natively support polymorphic types in the way that document-based NoSQL databases do, but you can achieve some level of polymorphism by encoding data in a structured way (e.g., JSON or hash maps).

"},{"location":"techdives/DistrubutedSystems/Redis/#3-main-feature-db-built-for-and-who-built-it-and-on-what","title":"3. Main Feature, DB Built For, and Who Built It and on What","text":"

Q: What was Redis built for, who built it, and what are its main features?

A: Redis was initially created by Salvatore Sanfilippo as a high-performance in-memory database for use cases requiring low-latency, real-time data processing. Redis is built on C, and its main features include in-memory storage, data persistence, flexible data structures, and capabilities for caching, messaging, and real-time analytics.

"},{"location":"techdives/DistrubutedSystems/Redis/#4-olap-or-oltp-does-it-support-acid-or-base","title":"4. OLAP or OLTP? Does it support ACID or BASE?","text":"

Q: Is Redis OLAP or OLTP, and does it adhere to ACID or BASE properties?

A: Redis is generally used in OLTP (Online Transaction Processing) scenarios due to its low-latency and high-throughput design. Redis does not natively support full ACID properties but can achieve atomic operations within individual commands due to its single-threaded nature. It follows the BASE (Basically Available, Soft state, Eventual consistency) model.

"},{"location":"techdives/DistrubutedSystems/Redis/#5-cap-theorem-where-does-redis-fall","title":"5. CAP Theorem \u2013 Where does Redis fall?","text":"

Q: How does Redis align with the CAP theorem, and what does each part (Consistency, Availability, Partition Tolerance) mean?

A: Redis, especially in a clustered setup, adheres to the CP (Consistency and Partition Tolerance) model of the CAP theorem. In a non-clustered single-instance setup, Redis is highly consistent. However, in a clustered setup, it sacrifices some availability for consistency.

Time Consistency in Redis can be achieved with strict persistence settings and synchronous replication.

"},{"location":"techdives/DistrubutedSystems/Redis/#6-cluster-structure-from-cluster-to-records-the-whole-path","title":"6. Cluster Structure \u2013 From Cluster to Records, the Whole Path","text":"

Q: What is the structure of a Redis cluster from clusters down to individual records?

A: A Redis cluster is organized as follows: - Cluster: Composed of multiple nodes. - Nodes: Each node is responsible for a subset of the keyspace, organized into hash slots (16,384 in total). - Shards: Each node represents a shard of the data and can replicate across replicas. - Keys/Records: Each key is hashed to a specific slot, determining the node responsible for storing it.

"},{"location":"techdives/DistrubutedSystems/Redis/#7-the-fundamentals-of-a-cluster-all-building-blocks-from-cluster-to-records","title":"7. The Fundamentals of a Cluster \u2013 All Building Blocks from Cluster to Records","text":"

Q: What are the core building blocks of a Redis cluster?

A: Core components include: - Nodes: Independent Redis instances in a cluster. - Hash Slots: Redis divides keys into 16,384 slots for distribution across nodes. - Replication: Each primary node can have replicas to ensure data redundancy. - Partitions (Shards): Each node holds a partition of data for horizontal scalability.

"},{"location":"techdives/DistrubutedSystems/Redis/#8-multi-master-support","title":"8. Multi-Master Support","text":"

Q: Does Redis support multi-master configurations?

A: Redis does not support multi-master configurations in its native setup. It uses a single-master architecture per shard to ensure consistency.

"},{"location":"techdives/DistrubutedSystems/Redis/#9-master-slave-relationship-in-data-nodes","title":"9. Master-Slave Relationship in Data Nodes","text":"

Q: Does Redis follow a master-slave structure between data nodes?

A: Yes, in a Redis cluster, each data shard has a single master with one or more replicas (slaves) for redundancy. The slaves serve as read-only replicas unless promoted during failover.

"},{"location":"techdives/DistrubutedSystems/Redis/#10-node-structures-in-cluster","title":"10. Node Structures in Cluster","text":"

Q: What are the structures of nodes in a Redis cluster?

A: In a Redis cluster, each node is responsible for a subset of hash slots, with a master node serving write requests and one or more replicas serving as failover or read-only instances.

"},{"location":"techdives/DistrubutedSystems/Redis/#11-cluster-scaling-horizontal-and-vertical","title":"11. Cluster Scaling \u2013 Horizontal and Vertical","text":"

Q: Does Redis support horizontal and vertical scaling, and which is preferred?

A: Redis supports horizontal scaling (adding more nodes) via sharding in a cluster, which is generally preferred. Vertical scaling (adding more memory/CPU) is also possible but limited by hardware.

"},{"location":"techdives/DistrubutedSystems/Redis/#12-high-availability-explanation","title":"12. High Availability \u2013 Explanation","text":"

Q: How does Redis provide high availability?

A: Redis achieves high availability through replication and Redis Sentinel for monitoring and automatic failover. Redis Cluster further enhances availability by automatically promoting replicas if a primary node fails.

"},{"location":"techdives/DistrubutedSystems/Redis/#13-fault-tolerance-explanation","title":"13. Fault Tolerance \u2013 Explanation","text":"

Q: What mechanisms does Redis have for fault tolerance?

A: Redis ensures fault tolerance through data replication across replicas, and Sentinel monitors the master nodes to trigger failover in case of node failure.

"},{"location":"techdives/DistrubutedSystems/Redis/#14-replication","title":"14. Replication","text":"

Q: How does replication work in Redis?

A: Redis replication is asynchronous by default, with each master node replicating data to one or more replicas. In the event of a master failure, a replica is promoted to master status.

"},{"location":"techdives/DistrubutedSystems/Redis/#15-partitioning-and-sharding","title":"15. Partitioning and Sharding","text":"

Q: How does Redis handle partitioning and sharding?

A: Redis uses hash-based partitioning with 16,384 hash slots to distribute data across nodes. Each key is assigned a hash slot, which maps it to a specific node.

"},{"location":"techdives/DistrubutedSystems/Redis/#16-caching-in-depth","title":"16. Caching in Depth","text":"

Q: How does Redis perform caching?

A: Redis is an in-memory cache, providing low-latency access with various caching strategies (e.g., TTL, eviction policies like LRU and LFU). It supports key expiration and eviction for memory management.

"},{"location":"techdives/DistrubutedSystems/Redis/#17-storage-type-trees-used-for-storage","title":"17. Storage Type \u2013 Trees Used for Storage","text":"

Q: What storage type and structures does Redis use?

A: Redis stores data in memory using simple data structures and does not use B-trees or similar structures. Data is kept in-memory and optionally persisted to disk (AOF/RDB).

"},{"location":"techdives/DistrubutedSystems/Redis/#18-segments-or-page-approach","title":"18. Segments or Page Approach?","text":"

Q: Does Redis use a segments approach, page approach, or something else?

A: Redis does not use segments or page-based storage. Data is stored in-memory and is managed directly by the Redis process.

"},{"location":"techdives/DistrubutedSystems/Redis/#19-indexing-how-does-it-work","title":"19. Indexing \u2013 How Does It Work?","text":"

Q: How does Redis handle indexing?

A: Redis does not use traditional indexing. Instead, it directly maps keys to hash slots in the cluster, providing O(1) access time to each key.

"},{"location":"techdives/DistrubutedSystems/Redis/#20-routing","title":"20. Routing","text":"

Q: How does Redis route requests to the correct node in a cluster?

A: Redis routes requests based on key hashing. The key is hashed to determine its slot, which maps it to a specific node.

"},{"location":"techdives/DistrubutedSystems/Redis/#21-latency-including-write-read-indexing-and-replication-latency","title":"21. Latency \u2013 Including Write, Read, Indexing, and Replication Latency","text":"

Q: What are Redis\u2019s latency characteristics?

A: Redis provides sub-millisecond read/write latency under normal conditions. Replication latency is generally low, though network overhead may add some delay.

"},{"location":"techdives/DistrubutedSystems/Redis/#22-versioning","title":"22. Versioning","text":"

Q: Does Redis support versioning?

A: Redis does not natively support versioning. Application logic may be required to manage version control if needed.

"},{"location":"techdives/DistrubutedSystems/Redis/#23-locking-and-concurrency","title":"23. Locking and Concurrency","text":"

Q: How does Redis handle locking and concurrency?

A: Redis supports distributed locking through the Redlock algorithm for ensuring safe concurrent access across clients.

"},{"location":"techdives/DistrubutedSystems/Redis/#24-write-ahead-logging-wal","title":"24. Write-Ahead Logging (WAL)","text":"

Q: Does Redis support WAL?

A: Redis does not use WAL directly. However, the Append-Only File (AOF) is similar, logging each write operation to ensure persistence.

"},{"location":"techdives/DistrubutedSystems/Redis/#25-change-data-capture-cdc-support","title":"25. Change Data Capture (CDC) Support","text":"

Q: Does Redis support CDC?

A: Redis does not natively support Change Data Capture. External tools may be needed for real-time data change tracking.

"},{"location":"techdives/DistrubutedSystems/Redis/#26-query-type-and-query-in-depth","title":"26. Query Type and Query in Depth","text":"

Q: What types of queries does Redis support?

A: Redis is key-based and supports simple read/write commands without complex query languages. Operations include GET, SET, HGET, ZADD, etc.

"},{"location":"techdives/DistrubutedSystems/Redis/#27-query-optimizers","title":"27. Query Optimizers","text":"

Q: Does Redis have query optimizers?

A: Redis does not have traditional query optimizers, as it operates in O(1) for most key-based lookups.

"},{"location":"techdives/DistrubutedSystems/Redis/#28-sql-support","title":"28. SQL Support","text":"

Q: Does Redis support SQL?

A: Redis does not natively support SQL. However, RedisJSON or other libraries can provide SQL-like querying capabilities.

"},{"location":"techdives/DistrubutedSystems/Redis/#29-circuit-breakers","title":"29. Circuit Breakers","text":"

Q:

Does Redis have built-in circuit breaker support?

A: Redis itself does not implement circuit breakers. This is typically handled at the application or middleware layer.

"},{"location":"techdives/DistrubutedSystems/Redis/#30-data-retention-and-lifecycle-management","title":"30. Data Retention and Lifecycle Management","text":"

Q: How does Redis handle data lifecycle and retention?

A: Redis supports TTL on keys, and policies like Least Recently Used (LRU) enable retention management. Redis doesn\u2019t support multi-tier storage.

"},{"location":"techdives/DistrubutedSystems/Redis/#31-other-features","title":"31. Other Features","text":"

Q: What other features does Redis offer?

A: Redis supports data structures like streams for event logging, pub/sub for messaging, and geospatial indexing for location-based queries.

"},{"location":"techdives/DistrubutedSystems/Redis/#32-additional-modules","title":"32. Additional Modules","text":"

Q: What modules or libraries can be added to Redis?

A: Redis offers modules like RedisJSON (for JSON handling), RedisGraph (for graph data), and RedisBloom (for probabilistic data structures).

"},{"location":"techdives/DistrubutedSystems/Redis/#33-optimization-and-tuning-of-clusters","title":"33. Optimization and Tuning of Clusters","text":"

Q: How do you optimize and tune Redis clusters?

A: Key optimizations include appropriate partitioning, replication settings, eviction policies, and monitoring memory/CPU usage.

"},{"location":"techdives/DistrubutedSystems/Redis/#34-backup-and-recovery","title":"34. Backup and Recovery","text":"

Q: How does Redis handle backup and recovery?

A: Redis supports RDB snapshots and AOF for persistence. Backups are easily managed via AOF or manual RDB dumps.

"},{"location":"techdives/DistrubutedSystems/Redis/#35-security","title":"35. Security","text":"

Q: What are Redis\u2019s security features?

A: Redis supports authentication (AUTH command), SSL/TLS encryption, IP whitelisting, and role-based access control.

"},{"location":"techdives/DistrubutedSystems/Redis/#36-migration","title":"36. Migration","text":"

Q: Does Redis support migration tools?

A: Redis offers tools like redis-cli for basic migration, and Redis Enterprise provides more advanced migration capabilities.

"},{"location":"techdives/DistrubutedSystems/Redis/#37-recommended-cluster-setup","title":"37. Recommended Cluster Setup","text":"

Q: What\u2019s the recommended Redis cluster setup?

A: Typically, a Redis Cluster setup starts with 3 master nodes (for redundancy) and 3 replicas for high availability, totaling 6 nodes.

"},{"location":"techdives/DistrubutedSystems/Redis/#38-basic-cluster-setup-with-node-numbers-in-distributed-mode","title":"38. Basic Cluster Setup with Node Numbers in Distributed Mode","text":"

Q: How does a basic Redis cluster setup look in distributed mode?

A: A minimal Redis Cluster in distributed mode consists of 3 master nodes (handling 5,461 slots each) with 1 replica per master for redundancy.

"},{"location":"techdives/DistrubutedSystems/Redis/#39-segments-approach-or-page-approach-or-others","title":"39. Segments Approach or Page Approach or others","text":"

Q: Does Redis use a segments approach, page approach, or another storage approach?

A: Redis does not use a segments or page-based approach as it is an in-memory database. Data is stored directly in memory with no fixed segment or page structure, allowing for rapid access to keys. Redis is optimized for speed, relying on data structures like hash tables and direct in-memory allocation rather than traditional on-disk segment or page methods common in disk-based databases.

"},{"location":"techdives/DistrubutedSystems/S3/","title":"Amazon S3 (Simple Storage Service)","text":""},{"location":"techdives/DistrubutedSystems/S3/#1-introduction","title":"1. Introduction","text":"

Amazon S3 is a scalable object storage service offered by Amazon Web Services (AWS). It is designed to store and retrieve any amount of data from anywhere on the web, making it suitable for various use cases, including data backup, archiving, big data analytics, and hosting static websites.

"},{"location":"techdives/DistrubutedSystems/S3/#2-architecture-and-fundamentals","title":"2. Architecture and Fundamentals","text":""},{"location":"techdives/DistrubutedSystems/S3/#3-storage-classes","title":"3. Storage Classes","text":"

S3 offers a variety of storage classes optimized for different use cases, balancing cost and performance:

"},{"location":"techdives/DistrubutedSystems/S3/#data-retrieval-options","title":"Data Retrieval Options","text":""},{"location":"techdives/DistrubutedSystems/S3/#4-durability-availability-and-redundancy","title":"4. Durability, Availability and Redundancy","text":""},{"location":"techdives/DistrubutedSystems/S3/#5-security-features","title":"5. Security Features","text":""},{"location":"techdives/DistrubutedSystems/S3/#6-data-management-and-lifecycle-policies","title":"6. Data Management and Lifecycle Policies","text":""},{"location":"techdives/DistrubutedSystems/S3/#7-performance-and-optimization","title":"7. Performance and Optimization","text":""},{"location":"techdives/DistrubutedSystems/S3/#8-use-cases","title":"8. Use Cases","text":""},{"location":"techdives/DistrubutedSystems/S3/#9-best-practices","title":"9. Best Practices","text":""},{"location":"techdives/GeneralConcepts/git/","title":"Git","text":"

Version control is the cornerstone of modern software development, and Git stands as the most widely used version control system in the world. Whether you're a beginner or an experienced if you're a developer understanding Git is crucial for collaborative and individual projects alike. In this article, we'll take a deep dive into Git, covering everything from its basics to its advanced features, to equip you with the knowledge to master it and with a cheat sheet at the end.

"},{"location":"techdives/GeneralConcepts/git/#what-is-git","title":"What is Git ?","text":"

Git is a distributed version control system that tracks changes in files, enabling multiple developers to collaborate on a project effectively. Created in 2005 by Linus Torvalds, Git was initially designed for managing the Linux kernel's development. Today, it powers everything from small personal projects to massive enterprise software systems.

Key features

"},{"location":"techdives/GeneralConcepts/git/#installing-git","title":"Installing Git","text":"

Getting started with Git begins with installing it on your system. Here's how you can set it up based on your operating system:

MacwindowsLinux Use Homebrew to install Git
brew install git\n

Download Git for Windows from git-scm.com and follow the installer instructions.

Install Git using your distribution's package manager
sudo apt install git  # For Debian/Ubuntu\nsudo yum install git  # For CentOS/Red Hat\n
"},{"location":"techdives/GeneralConcepts/git/#verify-installation","title":"Verify Installation","text":"To confirm Git is installed correctly, run
git --version\n
"},{"location":"techdives/GeneralConcepts/git/#initial-configuration","title":"Initial Configuration","text":"After installation, configure Git with your name, email, and preferred editor
git config --global user.name \"Your Name\"\ngit config --global user.email \"your.email@example.com\"\ngit config --global core.editor \"code\"  # Use VSCode or any editor\n
"},{"location":"techdives/GeneralConcepts/git/#getting-started-with-git","title":"Getting Started with Git","text":""},{"location":"techdives/GeneralConcepts/git/#creating-a-new-repository","title":"Creating a New Repository","text":"

To start tracking changes in a project, initialize a repository

git init\n

"},{"location":"techdives/GeneralConcepts/git/#clone-an-existing-repository","title":"Clone an Existing Repository","text":"

To work on an existing project, clone its repository

git clone <repository-url>\n

"},{"location":"techdives/GeneralConcepts/git/#tracking-changes","title":"Tracking Changes","text":"

Stage Changes: Add files to the staging area

git add <file>\n

Commit Changes: Save changes to the repository

git commit -m \"<Write a proper commit message>\"\n

"},{"location":"techdives/GeneralConcepts/git/#checking-repository-status","title":"Checking Repository Status","text":"

View the status of your working directory and staged files

git status\n

"},{"location":"techdives/GeneralConcepts/git/#viewing-commit-history","title":"Viewing Commit History","text":"

Review the project's history with

git log\ngit log --oneline  # Concise view\n

"},{"location":"techdives/GeneralConcepts/git/#working-with-branches","title":"Working with Branches","text":"

Git's branching system is one of its most powerful features. Branches allow you to work on different features or bug fixes without affecting the main codebase.

"},{"location":"techdives/GeneralConcepts/git/#creating-switching-branches","title":"Creating Switching Branches","text":"

Create a new branch

git branch <branch-name>\n
Switch to the branch
git checkout <branch-name>\ngit switch <branch-name>  # New alternative\n
Creating and Switching to the branch
git checkout -b <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#merging-branches","title":"Merging Branches","text":"

To integrate changes from one branch into another

git checkout main # replace main with custom branch\ngit merge <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#handling-merge-conflicts","title":"Handling Merge Conflicts","text":"

If Git detects conflicting changes, resolve them manually by editing the affected files. Then

git add <file>\ngit commit\n

"},{"location":"techdives/GeneralConcepts/git/#remote-repositories","title":"Remote Repositories","text":"

Remote repositories allow teams to collaborate effectively.

"},{"location":"techdives/GeneralConcepts/git/#adding-a-remote","title":"Adding a Remote","text":"

Link your local repository to a remote

git remote add origin <repository-url>\n

"},{"location":"techdives/GeneralConcepts/git/#pushing-changes","title":"Pushing Changes","text":"

Send your commits to the remote repository

git push origin <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#pulling-updates","title":"Pulling Updates","text":"

Fetch and integrate changes from the remote repository

git pull\n

"},{"location":"techdives/GeneralConcepts/git/#removing-a-remote","title":"Removing a Remote","text":"

If needed, you can remove a remote

git remote remove origin\n

"},{"location":"techdives/GeneralConcepts/git/#advanced-git","title":"Advanced Git","text":""},{"location":"techdives/GeneralConcepts/git/#stashing-changes","title":"Stashing Changes","text":"

Temporarily save changes without committing

git stash\n
Retrieve them later with
git stash apply\n

"},{"location":"techdives/GeneralConcepts/git/#cherry-picking","title":"Cherry-Picking","text":"

Apply a specific commit from another branch

git cherry-pick <commit-hash>\n

"},{"location":"techdives/GeneralConcepts/git/#rebasing","title":"Rebasing","text":"

Rebase your branch onto another

git rebase <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#amending-commits","title":"Amending Commits","text":"

Fix the last commit message or contents

git commit --amend\n

"},{"location":"techdives/GeneralConcepts/git/#understanding-git-internals","title":"Understanding Git Internals","text":"

Git operates by storing snapshots of your project at each commit, not deltas (differences). The key components of Git's internal storage include:

All data is stored in the .git directory.

"},{"location":"techdives/GeneralConcepts/git/#collaboration-workflows","title":"Collaboration Workflows","text":"

Teams often adopt workflows to streamline collaboration. Popular ones include:

"},{"location":"techdives/GeneralConcepts/git/#common-issues","title":"Common Issues","text":""},{"location":"techdives/GeneralConcepts/git/#best-practices","title":"Best Practices","text":""},{"location":"techdives/GeneralConcepts/git/#git-cheat-sheet","title":"Git Cheat Sheet","text":"

Below are all the essential Git commands.

Git Cheat Sheet

# Git Commands Cheat Sheet\n\n# Configuration\ngit config --global user.name \"Your Name\"       # Set user name\ngit config --global user.email \"your.email@uth.com\" # Set user email\ngit config --global core.editor \"code\"         # Set default editor\ngit config --list                              # View current configuration\n\n# Repository Management\ngit init                                       # Initialize a new repository\ngit clone <repository-url>                     # Clone an existing repository\ngit remote add origin <url>                    # Add remote repository\ngit remote -v                                  # View remote repositories\ngit remote remove <name>                       # Remove a remote\n\n# Staging and Committing\ngit add <file>                                 # Stage specific file\ngit add .                                      # Stage all files\ngit status                                     # Check status of repository\ngit commit -m \"Message\"                        # Commit with message\ngit commit --amend                             # Amend the last commit\n\n# Branching\ngit branch                                     # List branches\ngit branch <branch-name>                       # Create a new branch\ngit checkout <branch-name>                     # Switch to a branch\ngit checkout -b <branch-name>                  # Create and Switch to a branch\ngit switch <branch-name>                       # Modern way to switch branches\ngit branch -d <branch-name>                    # Delete a branch\ngit branch -D <branch-name>                    # Force delete a branch\n\n# Merging\ngit merge <branch-name>                        # Merge a branch into the current branch\n\n# Pulling and Pushing\ngit pull                                       # Fetch and merge from remote repository\ngit pull origin <branch-name>                  # Pull specific branch\ngit push origin <branch-name>                  # Push to remote repository\ngit push --all                                 # Push all branches\ngit push --tags                                # Push tags to remote\n\n# Logs and History\ngit log                                        # View commit history\ngit log --oneline                              # View concise commit history\ngit log --graph                                # View graphical commit history\n\n# Undo Changes\ngit reset HEAD <file>                          # Unstage a file\ngit checkout -- <file>                         # Discard changes in working directory\ngit revert <commit-hash>                       # Undo a specific commit (safe)\ngit reset <commit-hash>                        # Reset to a specific commit (dangerous)\n\n# Stashing\ngit stash                                      # Stash changes\ngit stash list                                 # List stashes\ngit stash apply                                # Apply the last stash\ngit stash drop                                 # Remove the last stash\ngit stash clear                                # Clear all stashes\n\n# Rebasing\ngit rebase <branch-name>                       # Rebase current branch onto another\ngit rebase -i <commit-hash>                    # Interactive rebase\n\n# Tags\ngit tag <tag-name>                             # Create a tag\ngit tag -a <tag-name> -m \"Message\"             # Create an annotated tag\ngit tag -d <tag-name>                          # Delete a tag locally\ngit push origin <tag-name>                     # Push a specific tag\ngit push --tags                                # Push all tags\n\n# Collaboration\ngit fetch                                      # Fetch updates from remote\ngit pull                                       # Fetch and merge updates\ngit pull origin <branch-name>                  # Pull specific branch\ngit push                                       # Push changes to remote\ngit push origin <branch-name>                  # Push specific branch\n\n# Ignoring Files\necho \"filename\" >> .gitignore                  # Add file to .gitignore\ngit rm --cached <file>                         # Stop tracking a file\n\n# Viewing Changes\ngit diff                                       # View unstaged changes\ngit diff --staged                              # View staged changes\ngit diff <commit-hash1> <commit-hash2>         # Compare two commits\n\n# Cherry-Picking\ngit cherry-pick <commit-hash>                  # Apply a specific commit to the current branch\n\n# Aliases\ngit config --global alias.co checkout          # Alias for checkout\ngit config --global alias.br branch            # Alias for branch\ngit config --global alias.cm commit            # Alias for commit\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\u200b\\-_,:!=\\[\\]()\"`/]+|\\.(?!\\d)|&[lg]t;|(?!\\b)(?=[A-Z][a-z])","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Under the Hood","text":"

Welcome to Deep Dives by MG

"},{"location":"blog/","title":"Blog","text":""},{"location":"blog/#coming-soon","title":"Coming soon","text":""},{"location":"changelog/","title":"Changelog","text":""},{"location":"changelog/#under-the-hood-by-mrudhul-guda","title":"Under the Hood by Mrudhul Guda","text":""},{"location":"changelog/#0.4.0","title":"0.4.0 November 16, 2024","text":""},{"location":"changelog/#0.3.0","title":"0.3.0 November 12, 2024","text":""},{"location":"changelog/#0.2.0","title":"0.2.0 November 9, 2024","text":""},{"location":"changelog/#0.1.0","title":"0.1.0 November 5, 2024","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/","title":"Abstract Factory","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#what","title":"What ?","text":"

The Abstract Factory Pattern is a creational design pattern that provides an interface for creating families of related or dependent objects without specifying their concrete classes. It promotes loose coupling between client code and the actual implementations, allowing the code to be more flexible and scalable.

The Abstract Factory pattern works as a super-factory that creates other factories. Each factory produced by the abstract factory is responsible for creating a family of related objects.

Key Characteristics

Class Diagram

AbstractFactory\n\u251c\u2500\u2500 createProductA()\n\u2514\u2500\u2500 createProductB()\n\nConcreteFactory1 \u2500\u2500\u2500\u2500> ProductA1, ProductB1\nConcreteFactory2 \u2500\u2500\u2500\u2500> ProductA2, ProductB2\n\nClient \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500> AbstractFactory, AbstractProduct\n
"},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s go with an example Imagine you are creating a UI component factory. Your application can switch between two themes: Dark Theme and Light Theme. Both themes provide the same types of components (buttons, text fields) but with different appearances.

Step-1: Define the Abstract Products
// Abstract Product: Button\npublic interface Button {\n    void render();\n}\n\n// Abstract Product: TextField\npublic interface TextField {\n    void render();\n}\n
Step-2: Create Concrete Products
// Concrete Product: Light Button\npublic class LightButton implements Button {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Light Button\");\n    }\n}\n\n// Concrete Product: Dark Button\npublic class DarkButton implements Button {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Dark Button\");\n    }\n}\n\n// Concrete Product: Light TextField\npublic class LightTextField implements TextField {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Light Text Field\");\n    }\n}\n\n// Concrete Product: Dark TextField\npublic class DarkTextField implements TextField {\n    @Override\n    public void render() {\n        System.out.println(\"Rendering a Dark Text Field\");\n    }\n
Step-3: Define the Abstract Factory Interface
public interface UIFactory {\n    Button createButton();\n    TextField createTextField();\n}\n
Step-4: Implement Concrete Factories
// Concrete Factory for Light Theme\npublic class LightUIFactory implements UIFactory {\n    @Override\n    public Button createButton() {\n        return new LightButton();\n    }\n\n    @Override\n    public TextField createTextField() {\n        return new LightTextField();\n    }\n}\n\n// Concrete Factory for Dark Theme\npublic class DarkUIFactory implements UIFactory {\n    @Override\n    public Button createButton() {\n        return new DarkButton();\n    }\n\n    @Override\n    public TextField createTextField() {\n        return new DarkTextField();\n    }\n}\n
Step-5: Using the Abstract Factory in a Client
public class Application {\n    private Button button;\n    private TextField textField;\n\n    public Application(UIFactory factory) {\n        this.button = factory.createButton();\n        this.textField = factory.createTextField();\n    }\n\n    public void renderUI() {\n        button.render();\n        textField.render();\n    }\n\n    public static void main(String[] args) {\n        // Client can choose between different factories.\n        UIFactory factory = new DarkUIFactory(); // Could be switched to LightUIFactory\n        Application app = new Application(factory);\n        app.renderUI();\n    }\n}\n

Output:

Rendering a Dark Button\nRendering a Dark Text Field\n

Spring Boot Example

In Spring Boot, the Abstract Factory pattern can complement dependency injection (DI) by delegating object creation logic to the factory. Here\u2019s how to implement it with Spring Boot.

Step-1: Define Factory Beans
@Configuration\npublic class UIFactoryConfig {\n\n    @Bean\n    public UIFactory uiFactory(@Value(\"${app.theme}\") String theme) {\n        if (\"dark\".equalsIgnoreCase(theme)) {\n            return new DarkUIFactory();\n        } else {\n            return new LightUIFactory();\n        }\n    }\n}\n
Step-2: Use the Factory in a Controller
@RestController\n@RequestMapping(\"/ui\")\npublic class UIController {\n\n    private final UIFactory uiFactory;\n\n    @Autowired\n    public UIController(UIFactory uiFactory) {\n        this.uiFactory = uiFactory;\n    }\n\n    @GetMapping(\"/render\")\n    public void renderUI() {\n        Button button = uiFactory.createButton();\n        TextField textField = uiFactory.createTextField();\n\n        button.render();\n        textField.render();\n    }\n}\n
Step-3: Configure Application Properties
# application.properties\napp.theme=dark\n

In this example, the theme is configured through the application.properties file, and the factory selection is handled by the Spring context.

"},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#factory-method-comparison","title":"Factory Method Comparison","text":"Aspect Factory Method Abstract Factory Purpose Create one type of product. Create families of related products. Complexity Less complex. More complex, involves multiple classes. Client Knowledge Client knows about individual products. Client works with factories, not specific products. Usage Simple use-cases. Complex, multi-product scenarios."},{"location":"fundamentaldives/DesignPatterns/AbstractFactory/#summary","title":"Summary","text":"

The Abstract Factory Pattern is a powerful tool when designing systems that need to create multiple families of related objects. While it adds complexity, the benefits include extensibility, maintainability, and loose coupling. In a Spring Boot application, it works well alongside dependency injection, especially when configurations like themes or environments vary.

"},{"location":"fundamentaldives/DesignPatterns/Adapter/","title":"Adapter Design Pattern","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#what","title":"What ?","text":"

The Adapter Pattern is a structural design pattern in software development that allows objects with incompatible interfaces to work together. It acts as a bridge between two incompatible interfaces, providing a wrapper or a mediator to enable their interaction without changing their existing code.

This Pattern converts the interface of a class into another interface that a client expects. This helps integrate two systems with different interfaces so they can work together without altering their code. It is often used when a legacy system needs to be integrated with new components or when third-party APIs are integrated into an existing codebase.

Analogy

Think of a power plug adapter You have an appliance with a US plug (two flat pins), but you need to connect it to a European socket (two round holes). The adapter ensures that both the incompatible interfaces (US and European plugs) work together without modifying either.

"},{"location":"fundamentaldives/DesignPatterns/Adapter/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Adapter/#ways-to-implement","title":"Ways to Implement","text":"Simple Example

There are two common ways to implement the Adapter Pattern:

  1. Class Adapter (Inheritance-based)
  2. Object Adapter (Composition-based)
Class Adapter Pattern (via Inheritance)

In this approach, the adapter class extends the adaptee (the class that has the incompatible interface) and implements the interface that the client expects.

Class Adapter Java Example
// Target Interface - The desired interface that client expects\ninterface MediaPlayer {\n    void play(String audioType, String fileName);\n}\n\n// Adaptee - Incompatible interface that needs adaptation\nclass AdvancedMediaPlayer {\n    void playMp3(String fileName) {\n        System.out.println(\"Playing mp3 file: \" + fileName);\n    }\n\n    void playMp4(String fileName) {\n        System.out.println(\"Playing mp4 file: \" + fileName);\n    }\n}\n\n// Class Adapter - Adapts AdvancedMediaPlayer to MediaPlayer\nclass MediaAdapter extends AdvancedMediaPlayer implements MediaPlayer {\n    @Override\n    public void play(String audioType, String fileName) {\n        if (audioType.equalsIgnoreCase(\"mp3\")) {\n            playMp3(fileName);\n        } else if (audioType.equalsIgnoreCase(\"mp4\")) {\n            playMp4(fileName);\n        }\n    }\n}\n\n// Client Code\npublic class AudioPlayer {\n    public static void main(String[] args) {\n        MediaPlayer player = new MediaAdapter();\n        player.play(\"mp3\", \"song.mp3\");\n        player.play(\"mp4\", \"video.mp4\");\n    }\n}\n
Explanation

MediaAdapter extends AdvancedMediaPlayer (inheriting the original functionality) and implements the MediaPlayer interface (adapting it to what the client expects).

Object Adapter Pattern (via Composition)

In this approach, the adapter contains an instance of the adaptee class and delegates calls to the appropriate methods.

Object Adapter Java Example
// Target Interface\ninterface MediaPlayer {\n    void play(String audioType, String fileName);\n}\n\n// Adaptee\nclass AdvancedMediaPlayer {\n    void playMp3(String fileName) {\n        System.out.println(\"Playing mp3 file: \" + fileName);\n    }\n\n    void playMp4(String fileName) {\n        System.out.println(\"Playing mp4 file: \" + fileName);\n    }\n}\n\n// Object Adapter\nclass MediaAdapter implements MediaPlayer {\n    private AdvancedMediaPlayer advancedPlayer;\n\n    public MediaAdapter(AdvancedMediaPlayer advancedPlayer) {\n        this.advancedPlayer = advancedPlayer;\n    }\n\n    @Override\n    public void play(String audioType, String fileName) {\n        if (audioType.equalsIgnoreCase(\"mp3\")) {\n            advancedPlayer.playMp3(fileName);\n        } else if (audioType.equalsIgnoreCase(\"mp4\")) {\n            advancedPlayer.playMp4(fileName);\n        }\n    }\n}\n\n// Client Code\npublic class AudioPlayer {\n    public static void main(String[] args) {\n        AdvancedMediaPlayer advancedPlayer = new AdvancedMediaPlayer();\n        MediaPlayer adapter = new MediaAdapter(advancedPlayer);\n        adapter.play(\"mp3\", \"song.mp3\");\n        adapter.play(\"mp4\", \"video.mp4\");\n    }\n}\n
Explanation

In this version, MediaAdapter holds a reference to the AdvancedMediaPlayer instance and delegates method calls instead of extending the class.

Spring Boot Example

In a Spring Boot context, the Adapter Pattern can be used to integrate an external or legacy service with your application's service layer.

Integrating a Legacy Payment Service
// Legacy Payment Service - Adaptee\nclass LegacyPaymentService {\n    public void payWithCreditCard(String cardNumber) {\n        System.out.println(\"Payment made using Legacy Credit Card: \" + cardNumber);\n    }\n}\n\n// Target Interface\ninterface PaymentService {\n    void processPayment(String cardNumber);\n}\n\n// Adapter Implementation - Integrating LegacyPaymentService with PaymentService\n@Component\nclass PaymentServiceAdapter implements PaymentService {\n    private final LegacyPaymentService legacyService;\n\n    // Constructor injection\n    public PaymentServiceAdapter(LegacyPaymentService legacyService) {\n        this.legacyService = legacyService;\n    }\n\n    @Override\n    public void processPayment(String cardNumber) {\n        legacyService.payWithCreditCard(cardNumber);\n    }\n}\n\n// Spring Boot Controller\n@RestController\n@RequestMapping(\"/payments\")\npublic class PaymentController {\n\n    private final PaymentService paymentService;\n\n    @Autowired\n    public PaymentController(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n\n    @PostMapping\n    public String makePayment(@RequestParam String cardNumber) {\n        paymentService.processPayment(cardNumber);\n        return \"Payment Successful\";\n    }\n}\n
Explanation "},{"location":"fundamentaldives/DesignPatterns/Adapter/#summary","title":"Summary","text":"

The Adapter Pattern enhances flexibility by decoupling client code from specific implementations, promotes reusability by enabling compatibility between systems, improves maintainability by isolating legacy or third-party code, and simplifies testing through easy mock or stub usage.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/","title":"Bridge","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#what","title":"What ?","text":"

The Bridge Pattern is a structural design pattern that helps to decouple an abstraction from its implementation so that both can vary independently. This pattern is especially useful when you need to manage complex class hierarchies or have multiple dimensions of variations.

When both the abstraction (interface) and its implementation (how it works internally) need to evolve, the code becomes complex and hard to manage. Bridge helps to separate these concerns. The pattern separates the abstraction (interface) from the actual implementation and lets them evolve independently by delegating the concrete work to another interface.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#structure","title":"Structure ?","text":"

The structure involves two key parts:

The Abstraction contains a reference to the Implementor (interface or class). This lets the abstraction delegate the implementation details to the concrete implementations.

Class Diagram

Abstraction --> Implementor\n    |                |\nRefinedAbstraction   ConcreteImplementor\n
"},{"location":"fundamentaldives/DesignPatterns/Bridge/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Bridge/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s look at a real-world example rendering shapes on different platforms. The rendering logic could vary depending on the platform (Windows, Linux, etc.), but the shape (e.g., Circle, Rectangle) stays the same.

Spring Boot Example

In a Spring Boot application, you can use the Bridge pattern to switch between different implementations of a service dynamically, such as switching between multiple ways of sending notifications (e.g., Email, SMS). This is helpful when services have multiple implementations, and you need to inject them dynamically without changing the client code.

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#rendering-shapes-on-different-platforms","title":"Rendering Shapes on Different Platforms","text":"Step-1: Define the Implementor interface
interface Renderer {\n    void render(String shape);\n}\n
Step-2: Create Concrete Implementor classes
class VectorRenderer implements Renderer {\n    @Override\n    public void render(String shape) {\n        System.out.println(\"Rendering \" + shape + \" as vectors.\");\n    }\n}\n\nclass RasterRenderer implements Renderer {\n    @Override\n    public void render(String shape) {\n        System.out.println(\"Rendering \" + shape + \" as pixels.\");\n    }\n}\n
Step-3: Define the Abstraction
abstract class Shape {\n    protected Renderer renderer;\n\n    public Shape(Renderer renderer) {\n        this.renderer = renderer;\n    }\n\n    public abstract void draw();\n}\n
Step-4: Create Refined Abstraction classes
class Circle extends Shape {\n    public Circle(Renderer renderer) {\n        super(renderer);\n    }\n\n    @Override\n    public void draw() {\n        renderer.render(\"Circle\");\n    }\n}\n\nclass Rectangle extends Shape {\n    public Rectangle(Renderer renderer) {\n        super(renderer);\n    }\n\n    @Override\n    public void draw() {\n        renderer.render(\"Rectangle\");\n    }\n}\n
Step-5: Client Code
public class BridgePatternDemo {\n    public static void main(String[] args) {\n        Shape circle = new Circle(new VectorRenderer());\n        circle.draw(); // Output: Rendering Circle as vectors.\n\n        Shape rectangle = new Rectangle(new RasterRenderer());\n        rectangle.draw(); // Output: Rendering Rectangle as pixels.\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Bridge/#notification-system-in-spring-boot","title":"Notification System in Spring Boot","text":"Step-1: Create the Implementor Interface (NotificationSender)
public interface NotificationSender {\n    void send(String message);\n}\n
Step-2. Implement Concrete Implementors (Email and SMS)
@Component\npublic class EmailSender implements NotificationSender {\n    @Override\n    public void send(String message) {\n        System.out.println(\"Sending Email: \" + message);\n    }\n}\n\n@Component\npublic class SmsSender implements NotificationSender {\n    @Override\n    public void send(String message) {\n        System.out.println(\"Sending SMS: \" + message);\n    }\n}\n
Step-3. Create the Abstraction (Notification)
public abstract class Notification {\n    protected NotificationSender sender;\n\n    public Notification(NotificationSender sender) {\n        this.sender = sender;\n    }\n\n    public abstract void notify(String message);\n}\n
Step-4. Create Refined Abstraction (UrgentNotification)
@Component\npublic class UrgentNotification extends Notification {\n\n    @Autowired\n    public UrgentNotification(NotificationSender sender) {\n        super(sender);\n    }\n\n    @Override\n    public void notify(String message) {\n        System.out.println(\"Urgent Notification:\");\n        sender.send(message);\n    }\n}\n
Step-5. Use the Bridge Pattern in a Controller
@RestController\n@RequestMapping(\"/notifications\")\npublic class NotificationController {\n\n    private final UrgentNotification notification;\n\n    @Autowired\n    public NotificationController(UrgentNotification notification) {\n        this.notification = notification;\n    }\n\n    @PostMapping(\"/send\")\n    public String sendNotification(@RequestBody String message) {\n        notification.notify(message);\n        return \"Notification sent!\";\n    }\n}\n

In this example, the Bridge Pattern allows you to switch between different ways of sending notifications (email or SMS) without changing the client code (the NotificationController).

"},{"location":"fundamentaldives/DesignPatterns/Bridge/#summary","title":"Summary","text":"

The Bridge Pattern is an essential design pattern to consider when your class hierarchy is growing unmanageable due to multiple dimensions of variations. Use this pattern when you need to decouple abstraction from implementation and allow them to evolve independently, but avoid it when simpler solutions can suffice.

"},{"location":"fundamentaldives/DesignPatterns/Builder/","title":"Builder Design","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#what","title":"What ?","text":"

The Builder Pattern is a creational design pattern that allows the construction of complex objects step by step. It separates the construction process from the actual object, giving more control over the construction process.

This Pattern simplifies the creation of complex objects with many optional fields by enabling incremental construction through method chaining, avoiding constructors with numerous parameters. It's ideal for objects requiring various configurations or optional parameters.

"},{"location":"fundamentaldives/DesignPatterns/Builder/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Builder/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Below is a basic Java example demonstrating the pattern. Assume we need to build a Car object with several optional fields.

Simple Builder Implementation
public class Car {\n    // Required fields\n    private final String make;\n    private final String model;\n\n    // Optional fields\n    private final String color;\n    private final int year;\n    private final boolean automatic;\n\n    // Private constructor accessible only through Builder\n    private Car(Builder builder) {\n        this.make = builder.make;\n        this.model = builder.model;\n        this.color = builder.color;\n        this.year = builder.year;\n        this.automatic = builder.automatic;\n    }\n\n    // Getters (optional, based on your needs)\n    public String getMake() { return make; }\n    public String getModel() { return model; }\n    public String getColor() { return color; }\n    public int getYear() { return year; }\n    public boolean isAutomatic() { return automatic; }\n\n    // Static inner Builder class\n    public static class Builder {\n        // Required fields\n        private final String make;\n        private final String model;\n\n        // Optional fields initialized to default values\n        private String color = \"White\";\n        private int year = 2020;\n        private boolean automatic = true;\n\n        // Builder constructor with required fields\n        public Builder(String make, String model) {\n            this.make = make;\n            this.model = model;\n        }\n\n        // Setter-like methods for optional fields, returning the builder object\n        public Builder color(String color) {\n            this.color = color;\n            return this;\n        }\n\n        public Builder year(int year) {\n            this.year = year;\n            return this;\n        }\n\n        public Builder automatic(boolean automatic) {\n            this.automatic = automatic;\n            return this;\n        }\n\n        // Build method to create the final Car object\n        public Car build() {\n            return new Car(this);\n        }\n    }\n\n    @Override\n    public String toString() {\n        return \"Car [make=\" + make + \", model=\" + model + \n            \", color=\" + color + \", year=\" + year + \n            \", automatic=\" + automatic + \"]\";\n    }\n}\n
Usage of the Builder Pattern
public class Main {\n    public static void main(String[] args) {\n        // Using the builder to create a Car object\n        Car car = new Car.Builder(\"Tesla\", \"Model S\")\n                            .color(\"Red\")\n                            .year(2023)\n                            .automatic(true)\n                            .build();\n\n        System.out.println(car);\n    }\n}\n

Output:

Car [make=Tesla, model=Model S, color=Red, year=2023, automatic=true]\n

Spring Boot Example

In Spring Boot, you often need to build objects like DTOs, configurations, or entities with complex structures. Using the Builder Pattern can make object construction more manageable, especially when working with REST APIs.

Using Builder Pattern for DTOs in Spring Boot
// Let's assume a UserDTO object for API responses.\npublic class UserDTO {\n    private final String username;\n    private final String email;\n    private final String role;\n\n    private UserDTO(Builder builder) {\n        this.username = builder.username;\n        this.email = builder.email;\n        this.role = builder.role;\n    }\n\n    public static class Builder {\n        private String username;\n        private String email;\n        private String role;\n\n        public Builder username(String username) {\n            this.username = username;\n            return this;\n        }\n\n        public Builder email(String email) {\n            this.email = email;\n            return this;\n        }\n\n        public Builder role(String role) {\n            this.role = role;\n            return this;\n        }\n\n        public UserDTO build() {\n            return new UserDTO(this);\n        }\n    }\n}\n
Controller Example with Builder Pattern in Spring Boot
@RestController\n@RequestMapping(\"/api/users\")\npublic class UserController {\n\n    @GetMapping(\"/{id}\")\n    public UserDTO getUserById(@PathVariable Long id) {\n        // Simulate fetching user details from a database\n        return new UserDTO.Builder()\n                .username(\"johndoe\")\n                .email(\"john.doe@example.com\")\n                .role(\"ADMIN\")\n                .build();\n    }\n}\n

This approach ensures that the object returned from the API is constructed cleanly with only the necessary fields set.

Alternative Ways

Telescoping Constructors Multiple overloaded constructors for different parameter combinations but Not ideal for readability and maintainability.

public Car(String make, String model) { ... }\npublic Car(String make, String model, String color) { ... }\npublic Car(String make, String model, String color, int year) { ... }\n

Setter Methods Useful for mutable objects but doesn\u2019t guarantee immutability but less readable when constructing objects with many attributes.

Car car = new Car();\ncar.setMake(\"Tesla\");\ncar.setModel(\"Model S\");\ncar.setColor(\"Red\");\n
"},{"location":"fundamentaldives/DesignPatterns/Builder/#summary","title":"Summary","text":"

The Builder Pattern is an elegant way to handle object creation, especially when dealing with many fields or optional parameters. It ensures code readability, immutability, and flexibility while avoiding the need for numerous constructors. However, it should be used only when necessary, as simple objects may not benefit from it.

Note

In Spring Boot, the Builder Pattern can be effectively used for creating DTOs and other complex objects, improving both code readability and maintenance. This pattern fits well when dealing with REST API responses or configuration settings, ensuring your objects are built in a clear, consistent manner.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/","title":"Circuit Breakers","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#what","title":"What ?","text":"

A circuit breaker is a design pattern used to prevent cascading failures and manage service availability. If a service call repeatedly fails, the circuit breaker \"trips\" and prevents further attempts, allowing the system to recover gracefully. This pattern mimics the behavior of electrical circuit breakers.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#why-is-it-needed","title":"Why is it Needed ?","text":"

Real-world scenario

If Service A depends on Service B but Service B becomes unavailable, Service A will receive failures continuously. A circuit breaker prevents Service A from overloading itself and Service B by failing fast.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#types-of-circuit-breakers","title":"Types of Circuit Breakers","text":"

There are multiple models of circuit breakers to choose from depending on use case:

Count-based Circuit Breaker - Trips if a predefined number of failures occur, eg: If there are 3 consecutive failed requests, the breaker opens.

Time-based Circuit Breaker - Monitors failures within a window of time and trips if the failure threshold is met, eg: If 5 requests out of 10 fail within 1 minute, it opens.

Sliding Window Circuit Breaker - A rolling window of requests over time determines if the circuit trips, Useful when failure patterns are sporadic.

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#how-does-it-work","title":"How Does it Work?","text":"

The basic mechanics of a circuit breaker involve three states:

Closed State

Open State

Half-Open State

State Transition Flow

Closed -> (failure threshold reached) -> Open -> (timeout) -> Half-Open -> (success) -> Closed\n
"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#use-cases","title":"Use Cases","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#implementation","title":"Implementation","text":"

Several popular libraries and frameworks make circuit breaker implementations simple. Below are code examples in Java and Python.

Java with Resilience4jPython with PyBreaker
// Resilience4j is a library providing circuit breaker implementations.\nimport io.github.resilience4j.circuitbreaker.CircuitBreaker;\nimport io.github.resilience4j.circuitbreaker.CircuitBreakerConfig;\nimport io.github.resilience4j.circuitbreaker.CircuitBreakerRegistry;\n\nimport java.time.Duration;\n\npublic class Example {\n    public static void main(String[] args) {\n        // Configuration\n        CircuitBreakerConfig config = CircuitBreakerConfig.custom()\n            .failureRateThreshold(50)  // Open if 50% of requests fail\n            .waitDurationInOpenState(Duration.ofSeconds(5))  // Wait 5 seconds before Half-Open\n            .build();\n\n        CircuitBreakerRegistry registry = CircuitBreakerRegistry.of(config);\n        CircuitBreaker circuitBreaker = registry.circuitBreaker(\"myService\");\n\n        // Wrap a call in the circuit breaker\n        String response = circuitBreaker.executeSupplier(() -> makeHttpRequest());\n\n        System.out.println(response);\n    }\n\n    private static String makeHttpRequest() {\n        // Simulate an HTTP request here\n        return \"Success!\";\n    }\n}\n
# PyBreaker is a library implementing the Circuit Breaker pattern for Python applications.\n\nfrom pybreaker import CircuitBreaker, CircuitBreakerError\nimport requests\n\n# Define a circuit breaker\nbreaker = CircuitBreaker(fail_max=3, reset_timeout=5)\n\n@breaker\ndef fetch_data(url):\n    response = requests.get(url)\n    if response.status_code != 200:\n        raise Exception(\"Service unavailable\")\n    return response.json()\n\ntry:\n    data = fetch_data('https://api.example.com/data')\n    print(data)\nexcept CircuitBreakerError:\n    print(\"Circuit is open. Service unavailable.\")\n
"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#advanced-topics","title":"Advanced Topics","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#monitoring-and-metrics","title":"Monitoring and Metrics","text":"

Circuit breakers need to be monitored to ensure they perform correctly. You can integrate them with monitoring tools like Prometheus or Grafana. Many libraries offer hooks to capture metrics such as

"},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#tuning-the-circuit-breaker","title":"Tuning the Circuit Breaker","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#testing-circuit-breakers","title":"Testing Circuit Breakers","text":""},{"location":"fundamentaldives/DesignPatterns/CircuitBreakers/#summary","title":"Summary","text":"

Circuit breakers are crucial in modern, distributed systems, preventing unnecessary retries and protecting systems from cascading failures. As systems grow in complexity, using the circuit breaker pattern helps maintain high availability and resilience.

"},{"location":"fundamentaldives/DesignPatterns/Composite/","title":"Composite","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#what","title":"What ?","text":"

The Composite Pattern is a structural design pattern used in software design to represent part whole hierarchies. It enables you to build complex object structures by treating both individual objects and compositions of objects uniformly.

This pattern allows you to treat a group of objects in the same way as a single object. This is especially useful when building tree structures (like directories or UI components).

Key Concepts

The idea is to define a common interface for all the objects, whether simple or complex, so they can be treated uniformly.

Basic Structure UML

Component (Interface or Abstract class)\n\u251c\u2500\u2500 Leaf (Concrete class)\n\u2514\u2500\u2500 Composite (Concrete class containing Components)\n
"},{"location":"fundamentaldives/DesignPatterns/Composite/#when","title":"When ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#ways-to-create","title":"Ways to Create ?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#how-to-use-effectively","title":"How to Use Effectively?","text":""},{"location":"fundamentaldives/DesignPatterns/Composite/#how-to-apply","title":"How to Apply ?","text":"Simple Example
// 1. Component Interface\ninterface Component {\n    void showDetails();  // Common operation for both Leaf and Composite\n}\n\n// 2. Leaf Class (Single object)\nclass Employee implements Component {\n    private String name;\n    private String position;\n\n    public Employee(String name, String position) {\n        this.name = name;\n        this.position = position;\n    }\n\n    @Override\n    public void showDetails() {\n        System.out.println(name + \" works as \" + position);\n    }\n}\n\n// 3. Composite Class (Composite of Components)\nclass Department implements Component {\n    private List<Component> employees = new ArrayList<>();\n\n    public void addEmployee(Component employee) {\n        employees.add(employee);\n    }\n\n    public void removeEmployee(Component employee) {\n        employees.remove(employee);\n    }\n\n    @Override\n    public void showDetails() {\n        for (Component employee : employees) {\n            employee.showDetails();\n        }\n    }\n}\n\n// 4. Client Code\npublic class CompositePatternDemo {\n    public static void main(String[] args) {\n        // Create individual employees\n        Component emp1 = new Employee(\"John\", \"Developer\");\n        Component emp2 = new Employee(\"Doe\", \"Tester\");\n\n        // Create a department and add employees to it\n        Department department = new Department();\n        department.addEmployee(emp1);\n        department.addEmployee(emp2);\n\n        // Show details\n        System.out.println(\"Department Details:\");\n        department.showDetails();\n    }\n}\n
Output
Department Details:\nJohn works as Developer\nDoe works as Tester\n
Spring Boot Example

In Spring Boot, the Composite pattern can fit into cases where you model tree-based structures in your business logic, such as:

  1. Entity Relationships in JPA: Modeling nested categories, departments, or menus.
  2. Business Service Layer: Creating a unified API to handle both individual and composite objects.
"},{"location":"fundamentaldives/DesignPatterns/Composite/#product-category-service-in-spring-boot","title":"Product Category Service in Spring Boot","text":"
// 1. Component Interface\npublic interface ProductCategory {\n    String getName();\n    void showCategoryDetails();\n}\n\n// 2. Leaf Class\npublic class Product implements ProductCategory {\n    private String name;\n\n    public Product(String name) {\n        this.name = name;\n    }\n\n    @Override\n    public String getName() {\n        return name;\n    }\n\n    @Override\n    public void showCategoryDetails() {\n        System.out.println(\"Product: \" + name);\n    }\n}\n\n// 3. Composite Class\npublic class Category implements ProductCategory {\n    private String name;\n    private List<ProductCategory> children = new ArrayList<>();\n\n    public Category(String name) {\n        this.name = name;\n    }\n\n    public void add(ProductCategory category) {\n        children.add(category);\n    }\n\n    public void remove(ProductCategory category) {\n        children.remove(category);\n    }\n\n    @Override\n    public String getName() {\n        return name;\n    }\n\n    @Override\n    public void showCategoryDetails() {\n        System.out.println(\"Category: \" + name);\n        for (ProductCategory child : children) {\n            child.showCategoryDetails();\n        }\n    }\n}\n\n// 4. Controller in Spring Boot\n@RestController\n@RequestMapping(\"/categories\")\npublic class CategoryController {\n\n    @GetMapping(\"/example\")\n    public void example() {\n        // Creating products\n        ProductCategory p1 = new Product(\"Laptop\");\n        ProductCategory p2 = new Product(\"Phone\");\n\n        // Creating a category and adding products\n        Category electronics = new Category(\"Electronics\");\n        electronics.add(p1);\n        electronics.add(p2);\n\n        // Display details\n        electronics.showCategoryDetails();\n    }\n}\n
Sample Output when calling /categories/example
Category: Electronics\nProduct: Laptop\nProduct: Phone\n
Spring Boot Considerations "},{"location":"fundamentaldives/DesignPatterns/Composite/#summary","title":"Summary","text":"

The Composite Pattern is a powerful structural pattern for managing hierarchical, tree-like structures. It allows uniform handling of individual and composite objects, making it ideal for UI elements, filesystems, or business domains with nested elements. When integrating with Spring Boot, it works well in controllers, services, or JPA entities for modeling hierarchical data. However, avoid using it when there\u2019s no hierarchy or when performance is critical (deep recursion). Use it wisely, and it can help you reduce complexity and simplify your code.

"},{"location":"fundamentaldives/DesignPatterns/Decorator/","title":"Decorator","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#what","title":"What ?","text":"

The Decorator Pattern is a structural design pattern that allows behavior to be added to individual objects, either statically or dynamically, without affecting the behavior of other objects from the same class. This pattern is particularly useful when you need to add functionality to objects without subclassing and in scenarios where multiple combinations of behaviors are required.

This pattern is used to attach additional responsibilities or behaviors to an object dynamically. It wraps the original object, adding new behavior while keeping the object\u2019s interface intact. A decorator class has a reference to the original object and implements the same interface.

Key Concepts

"},{"location":"fundamentaldives/DesignPatterns/Decorator/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Decorator/#how-to-apply","title":"How to Apply ?","text":"Simple Example

Let's consider an example where we are building a coffee shop. Different types of coffees can be enhanced with add-ons like milk, sugar, etc. Using the decorator pattern, we can apply these add-ons dynamically without subclassing.

// Step 1: Component Interface\npublic interface Coffee {\n    String getDescription();\n    double getCost();\n}\n\n// Step 2: ConcreteComponent (Basic Coffee)\npublic class BasicCoffee implements Coffee {\n    @Override\n    public String getDescription() {\n        return \"Basic Coffee\";\n    }\n\n    @Override\n    public double getCost() {\n        return 2.0;\n    }\n}\n\n// Step 3: Decorator (Abstract)\npublic abstract class CoffeeDecorator implements Coffee {\n    protected Coffee coffee; // The object being decorated\n\n    public CoffeeDecorator(Coffee coffee) {\n        this.coffee = coffee;\n    }\n\n    public String getDescription() {\n        return coffee.getDescription();\n    }\n\n    public double getCost() {\n        return coffee.getCost();\n    }\n}\n\n// Step 4: Concrete Decorators (e.g., Milk, Sugar)\npublic class MilkDecorator extends CoffeeDecorator {\n    public MilkDecorator(Coffee coffee) {\n        super(coffee);\n    }\n\n    @Override\n    public String getDescription() {\n        return coffee.getDescription() + \", Milk\";\n    }\n\n    @Override\n    public double getCost() {\n        return coffee.getCost() + 0.5;\n    }\n}\n\npublic class SugarDecorator extends CoffeeDecorator {\n    public SugarDecorator(Coffee coffee) {\n        super(coffee);\n    }\n\n    @Override\n    public String getDescription() {\n        return coffee.getDescription() + \", Sugar\";\n    }\n\n    @Override\n    public double getCost() {\n        return coffee.getCost() + 0.2;\n    }\n}\n\n// Step 5: Usage\npublic class CoffeeShop {\n    public static void main(String[] args) {\n        Coffee coffee = new BasicCoffee();\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n\n        coffee = new MilkDecorator(coffee);\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n\n        coffee = new SugarDecorator(coffee);\n        System.out.println(coffee.getDescription() + \" $\" + coffee.getCost());\n    }\n}\n
Output
Basic Coffee $2.0\nBasic Coffee, Milk $2.5\nBasic Coffee, Milk, Sugar $2.7\n
Spring Boot Example

In Spring Boot, the decorator pattern can be used in scenarios such as logging, monitoring, or security checks. You can implement a decorator pattern to enhance service classes without changing their core logic. Here's an example where we decorate a service class to add logging functionality.

Component Interface (Service Layer)
public interface UserService {\n    String getUserDetails(String userId);\n}\n
Concrete Component
@Service\npublic class UserServiceImpl implements UserService {\n    @Override\n    public String getUserDetails(String userId) {\n        return \"User details for \" + userId;\n    }\n}\n
Decorator
@Service\npublic class LoggingUserService implements UserService {\n\n    private final UserService userService;\n\n    public LoggingUserService(UserService userService) {\n        this.userService = userService;\n    }\n\n    @Override\n    public String getUserDetails(String userId) {\n        System.out.println(\"Fetching details for user: \" + userId);\n        return userService.getUserDetails(userId);\n    }\n}\n
Configuration to Use Decorator
@Configuration\npublic class ServiceConfig {\n\n    @Bean\n    public UserService userService(UserServiceImpl userServiceImpl) {\n        return new LoggingUserService(userServiceImpl);\n    }\n}\n
How it Works in Spring Boot "},{"location":"fundamentaldives/DesignPatterns/Decorator/#summary","title":"Summary","text":"

The Decorator Pattern is a powerful and flexible way to enhance objects with additional behaviors dynamically without altering their structure. It shines in scenarios requiring combinations of behaviors and helps maintain clean, modular code.

In Spring Boot, it can be used for decorating services with additional features like logging, security, or metrics, allowing these aspects to remain separate from the core business logic.

This pattern should be used thoughtfully since excessive use can introduce complexity and make debugging difficult. However, when applied correctly, it ensures that code remains extensible and adheres to the Single Responsibility Principle and Open Closed Principle.

"},{"location":"fundamentaldives/DesignPatterns/Facade/","title":"Facade","text":"

The Facade Pattern is a structural design pattern commonly used to provide a simple, unified interface to a complex subsystem of classes, libraries, or frameworks. This pattern makes a complex library or system easier to use by hiding the underlying complexities and exposing only the functionality that is relevant for the client.

"},{"location":"fundamentaldives/DesignPatterns/Facade/#what","title":"What ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#how-to-create","title":"How to Create ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#disadvantages","title":"Disadvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Facade/#how-to-apply","title":"How to Apply ?","text":"Simple Example

Let's go through a simple Java example demonstrating a Facade pattern for a Home Theater System:

// Subsystem classes\nclass Amplifier {\n    public void on() { System.out.println(\"Amplifier is ON.\"); }\n    public void off() { System.out.println(\"Amplifier is OFF.\"); }\n}\n\nclass DVDPlayer {\n    public void play() { System.out.println(\"Playing movie.\"); }\n    public void stop() { System.out.println(\"Stopping movie.\"); }\n}\n\nclass Projector {\n    public void on() { System.out.println(\"Projector is ON.\"); }\n    public void off() { System.out.println(\"Projector is OFF.\"); }\n}\n\n// Facade class\nclass HomeTheaterFacade {\n    private Amplifier amplifier;\n    private DVDPlayer dvdPlayer;\n    private Projector projector;\n\n    public HomeTheaterFacade(Amplifier amp, DVDPlayer dvd, Projector proj) {\n        this.amplifier = amp;\n        this.dvdPlayer = dvd;\n        this.projector = proj;\n    }\n\n    public void watchMovie() {\n        System.out.println(\"Setting up movie...\");\n        amplifier.on();\n        projector.on();\n        dvdPlayer.play();\n    }\n\n    public void endMovie() {\n        System.out.println(\"Shutting down movie...\");\n        dvdPlayer.stop();\n        projector.off();\n        amplifier.off();\n    }\n}\n\n// Client code\npublic class FacadePatternDemo {\n    public static void main(String[] args) {\n        Amplifier amp = new Amplifier();\n        DVDPlayer dvd = new DVDPlayer();\n        Projector proj = new Projector();\n\n        HomeTheaterFacade homeTheater = new HomeTheaterFacade(amp, dvd, proj);\n\n        homeTheater.watchMovie();\n        homeTheater.endMovie();\n    }\n}\n
Output
Setting up movie...\nAmplifier is ON.\nProjector is ON.\nPlaying movie.\nShutting down movie...\nStopping movie.\nProjector is OFF.\nAmplifier is OFF.\n
Explanation: Spring Boot Example

In Spring Boot, the Facade pattern can be applied to services or controllers to hide the complexity of business logic or external systems. For example, a facade class can wrap multiple service calls or integrate external APIs to provide a simplified interface to the client (like a REST controller).

Let's go through a example of how to apply the Facade pattern in a Spring Boot application.

Example Scenario: A Payment System interacts with several services (like PaymentGatewayService, NotificationService, and OrderService). We create a PaymentFacade to simplify the interaction.

Step-1: Subsystem Services
@Service\npublic class PaymentGatewayService {\n    public void processPayment(String orderId) {\n        System.out.println(\"Processing payment for order: \" + orderId);\n    }\n}\n\n@Service\npublic class NotificationService {\n    public void sendNotification(String message) {\n        System.out.println(\"Sending notification: \" + message);\n    }\n}\n\n@Service\npublic class OrderService {\n    public void completeOrder(String orderId) {\n        System.out.println(\"Completing order: \" + orderId);\n    }\n}\n
Step-2: Facade Class
@Service\npublic class PaymentFacade {\n\n    private final PaymentGatewayService paymentGatewayService;\n    private final NotificationService notificationService;\n    private final OrderService orderService;\n\n    @Autowired\n    public PaymentFacade(PaymentGatewayService paymentGatewayService,\n                        NotificationService notificationService,\n                        OrderService orderService) {\n        this.paymentGatewayService = paymentGatewayService;\n        this.notificationService = notificationService;\n        this.orderService = orderService;\n    }\n\n    public void makePayment(String orderId) {\n        System.out.println(\"Initiating payment process...\");\n        paymentGatewayService.processPayment(orderId);\n        orderService.completeOrder(orderId);\n        notificationService.sendNotification(\"Payment completed for order: \" + orderId);\n    }\n}\n
Step-3: Controller
@RestController\n@RequestMapping(\"/api/payment\")\npublic class PaymentController {\n\n    private final PaymentFacade paymentFacade;\n\n    @Autowired\n    public PaymentController(PaymentFacade paymentFacade) {\n        this.paymentFacade = paymentFacade;\n    }\n\n    @PostMapping(\"/pay/{orderId}\")\n    public ResponseEntity<String> pay(@PathVariable String orderId) {\n        paymentFacade.makePayment(orderId);\n        return ResponseEntity.ok(\"Payment successful for order: \" + orderId);\n    }\n}\n
Explanation "},{"location":"fundamentaldives/DesignPatterns/Facade/#summary","title":"Summary","text":"

The Facade Pattern is a powerful tool for simplifying interactions with complex systems. It is especially useful when working with large subsystems or external APIs, as it encapsulates the internal workings and provides a simple interface to clients. In a Spring Boot application, you can use it to manage complex business logic or interactions with multiple services within a single, cohesive facade. However, it should be used judiciously to avoid over-abstraction or unnecessary complexity.

Note

This makes the Facade pattern a valuable asset in both object-oriented design and modern frameworks like Spring Boot.

"},{"location":"fundamentaldives/DesignPatterns/Facade/#this-pattern-is-best-used-when","title":"This pattern is best used when:","text":"
- You need to simplify client interactions.\n- You want to decouple the client from a complex subsystem.\n- You aim to improve maintainability and reduce dependencies.\n
"},{"location":"fundamentaldives/DesignPatterns/Facade/#avoid-using-it-when","title":"Avoid using it when:","text":"
- The system is already simple.\n- Performance is a key concern.\n
"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/","title":"Factory Method","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#what","title":"What ?","text":"

Factory Method is a creational design pattern that provides an interface for creating objects in a superclass, but allows subclasses to alter the type of objects that will be created (decide which class to instantiate). This pattern delegates the responsibility of object creation to subclasses rather than using a direct constructor call, It is one of the most widely used creational design patterns, It helps in the creation of objects without specifying the exact class of the object that will be created.

You provide a \"factory\" method that the client code calls to get the object, but the actual object that gets created is determined at runtime (based on some logic).

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#where-it-shines","title":"Where it Shines ?","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#structure","title":"Structure","text":"
  1. Product Interface: Defines the interface for the object being created.
  2. Concrete Product: Implements the product interface.
  3. Creator: Declares the factory method which returns an object of type Product.
  4. Concrete Creator: Overrides the factory method to return a specific product instance.
"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#how-to-implement","title":"How To Implement ?","text":"Structured Example Step-1: Define a Product Interface
public interface Notification {\n    void notifyUser();\n}\n
Step-2: Create Concrete Implementations of the Product
public class SMSNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an SMS notification.\");\n    }\n}\n\npublic class EmailNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an Email notification.\");\n    }\n}\n
Step-3: Create an Abstract Factory Class
public abstract class NotificationFactory {\n    public abstract Notification createNotification();\n}\n
Step-4: Implement Concrete Factory Classes
public class SMSNotificationFactory extends NotificationFactory {\n    @Override\n    public Notification createNotification() {\n        return new SMSNotification();\n    }\n}\n\npublic class EmailNotificationFactory extends NotificationFactory {\n    @Override\n    public Notification createNotification() {\n        return new EmailNotification();\n    }\n}\n
Step-5: Usage in Client Code
public class Client {\n    public static void main(String[] args) {\n        NotificationFactory factory = new SMSNotificationFactory();\n        Notification notification = factory.createNotification();\n        notification.notifyUser();\n\n        factory = new EmailNotificationFactory();\n        notification = factory.createNotification();\n        notification.notifyUser();\n    }\n}\n
Spring Boot Example

Spring Boot relies heavily on dependency injection (DI) and Inversion of Control (IoC), which means Spring beans can act as factory classes to produce the desired objects.

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#example-notification-factory-with-spring-boot","title":"Example: Notification Factory with Spring Boot","text":"Define the Product Interface and Implementations (Same as Before)
public interface Notification {\n    void notifyUser();\n}\n\npublic class SMSNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an SMS notification.\");\n    }\n}\n\npublic class EmailNotification implements Notification {\n    @Override\n    public void notifyUser() {\n        System.out.println(\"Sending an Email notification.\");\n    }\n}\n
Create a Spring Factory Class
import org.springframework.stereotype.Service;\n// This class will act as the **Factory**. You can make it a Spring **`@Service` or `@Component`** bean, so Spring manages it.\n@Service\npublic class NotificationFactory {\n    public Notification createNotification(String type) {\n        if (type.equalsIgnoreCase(\"SMS\")) {\n            return new SMSNotification();\n        } else if (type.equalsIgnoreCase(\"Email\")) {\n            return new EmailNotification();\n        }\n        throw new IllegalArgumentException(\"Unknown notification type: \" + type);\n    }\n}\n
Use the Factory Class in a Spring Controller
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.PathVariable;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class NotificationController {\n\n    @Autowired\n    private NotificationFactory notificationFactory;\n\n    @GetMapping(\"/notify/{type}\")\n    public String sendNotification(@PathVariable String type) {\n        Notification notification = notificationFactory.createNotification(type);\n        notification.notifyUser();\n        return \"Notification sent: \" + type;\n    }\n}\n

When you access /notify/SMS or /notify/Email, it will dynamically create and send the corresponding notification after running the spring boot application.

"},{"location":"fundamentaldives/DesignPatterns/FactoryMethod/#summary","title":"Summary","text":"

The Factory Pattern enables dynamic object creation without specifying exact classes, reducing coupling and improving maintainability. It uses a factory method to determine which class to instantiate. In Spring Boot, this pattern integrates seamlessly through factory beans and dependency injection, providing flexible and condition-based object creation.

Note

Factory Method is especially helpful in modular systems where new functionalities might be added frequently, and we want to minimize the impact of changes to existing code.

"},{"location":"fundamentaldives/DesignPatterns/Iterator/","title":"Iterator","text":"

The Iterator pattern is a behavioral design pattern that allows sequential access to elements of a collection without exposing its underlying structure. The goal is to provide a way to access the elements of an aggregate object (such as an array, list, or set) one by one, without needing to understand how the collection is implemented.

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#what","title":"What ?","text":"

Iterator is a behavioral design pattern that lets you traverse elements of a collection without exposing its underlying representation (list, stack, tree, etc.).

Key Components

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#disadvantages","title":"DisAdvantages","text":""},{"location":"fundamentaldives/DesignPatterns/Iterator/#how-to-implement","title":"How to Implement ?","text":"Sample Example

Let's go through with a simple Java example of a custom iterator for a list of strings.

Defining the Iterator Interface
interface Iterator<T> {\n    boolean hasNext();\n    T next();\n}\n
Creating a Concrete Iterator
class NameIterator implements Iterator<String> {\n    private String[] names;\n    private int index;\n\n    public NameIterator(String[] names) {\n        this.names = names;\n    }\n\n    @Override\n    public boolean hasNext() {\n        return index < names.length;\n    }\n\n    @Override\n    public String next() {\n        if (this.hasNext()) {\n            return names[index++];\n        }\n        return null;\n    }\n}\n
Defining the Aggregate (Collection) Interface
interface NameCollection {\n    Iterator<String> getIterator();\n}\n
Implementing the Concrete Aggregate (Collection)
class NameRepository implements NameCollection {\n    private String[] names = {\"John\", \"Alice\", \"Robert\", \"Michael\"};\n\n    @Override\n    public Iterator<String> getIterator() {\n        return new NameIterator(names);\n    }\n}\n
Usage Example in Java
public class IteratorPatternDemo {\n    public static void main(String[] args) {\n        NameRepository namesRepository = new NameRepository();\n        Iterator<String> iterator = namesRepository.getIterator();\n\n        while (iterator.hasNext()) {\n            String name = iterator.next();\n            System.out.println(\"Name: \" + name);\n        }\n    }\n}\n
Using Java\u2019s Built-in Iterators

Java already provides built-in iterators for its collection framework (Iterator, ListIterator, and Spliterator).

Example with Java\u2019s Built-in Iterator
import java.util.ArrayList;\nimport java.util.Iterator;\nimport java.util.List;\n\npublic class BuiltInIteratorExample {\n    public static void main(String[] args) {\n        List<String> names = new ArrayList<>();\n        names.add(\"John\");\n        names.add(\"Alice\");\n        names.add(\"Robert\");\n\n        Iterator<String> iterator = names.iterator();\n        while (iterator.hasNext()) {\n            System.out.println(\"Name: \" + iterator.next());\n        }\n    }\n}\n
Spring Boot Example

Spring Boot doesn\u2019t explicitly use the Iterator pattern as part of its core framework, but it does utilize Iterators internally (e.g., when dealing with ApplicationContext beans or data access layers). You can integrate the Iterator pattern within Spring Boot to handle collections of objects such as configurations, database entities, or APIs.

Create a Model Class
public class Book {\n    private String title;\n\n    public Book(String title) {\n        this.title = title;\n    }\n\n    public String getTitle() {\n        return title;\n    }\n}\n
Create a Repository to Hold Books
import java.util.ArrayList;\nimport java.util.List;\n\npublic class BookRepository {\n    private List<Book> books = new ArrayList<>();\n\n    public BookRepository() {\n        books.add(new Book(\"Spring in Action\"));\n        books.add(new Book(\"Java 8 in Action\"));\n        books.add(new Book(\"Microservices Patterns\"));\n    }\n\n    public Iterator<Book> getIterator() {\n        return books.iterator();\n    }\n}\n
Service to Fetch Books Using Iterator
import org.springframework.stereotype.Service;\nimport java.util.Iterator;\n\n@Service\npublic class BookService {\n    private final BookRepository bookRepository = new BookRepository();\n\n    public void printBooks() {\n        Iterator<Book> iterator = bookRepository.getIterator();\n        while (iterator.hasNext()) {\n            Book book = iterator.next();\n            System.out.println(\"Book: \" + book.getTitle());\n        }\n    }\n}\n
Controller to Trigger the Book Listing
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\n@RequestMapping(\"/books\")\npublic class BookController {\n\n    @Autowired\n    private BookService bookService;\n\n    @GetMapping\n    public void listBooks() {\n        bookService.printBooks();\n    }\n}\n

When you access http://localhost:8080/books, it will print the list of books using the iterator after running the application

"},{"location":"fundamentaldives/DesignPatterns/Iterator/#summary","title":"Summary","text":"

The Iterator pattern is a powerful way to decouple iteration logic from collections, ensuring a clean separation of concerns. It\u2019s useful in Java projects where complex or multiple ways of traversal are required. When integrated into Spring Boot, the pattern can be applied to iterate over configurations, data models, or APIs efficiently.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/","title":"Prototype","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#what","title":"What ?","text":"

The Prototype Pattern is a creational design pattern used when the cost of creating a new object is expensive or complicated. Instead of creating new instances from scratch, this pattern suggests cloning existing objects to produce new ones.

The pattern allows cloning or copying existing instances to create new ones, ensuring that new objects are created without going through the expensive or complex instantiation process repeatedly.

Key Characteristics

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#where-not-to-use","title":"Where Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Prototype/#how-to-implement","title":"How to Implement ?","text":"Simple Example

In Java, the Prototype Pattern is implemented by making the class implement the Cloneable interface and overriding the clone() method from the Object class.

Example of Prototype in Java
class Address {\n    String street;\n    String city;\n\n    public Address(String street, String city) {\n        this.street = street;\n        this.city = city;\n    }\n\n    // Deep Copy\n    public Address(Address address) {\n        this.street = address.street;\n        this.city = address.city;\n    }\n\n    @Override\n    public String toString() {\n        return street + \", \" + city;\n    }\n}\n\nclass Employee implements Cloneable {\n    String name;\n    Address address;\n\n    public Employee(String name, Address address) {\n        this.name = name;\n        this.address = address;\n    }\n\n    // Shallow Copy\n    @Override\n    protected Object clone() throws CloneNotSupportedException {\n        return super.clone();\n    }\n\n    // Deep Copy\n    public Employee deepClone() {\n        return new Employee(this.name, new Address(this.address));\n    }\n\n    @Override\n    public String toString() {\n        return \"Employee: \" + name + \", Address: \" + address;\n    }\n}\n\npublic class PrototypeExample {\n    public static void main(String[] args) throws CloneNotSupportedException {\n        Employee emp1 = new Employee(\"Alice\", new Address(\"123 Street\", \"New York\"));\n\n        // Shallow Clone\n        Employee emp2 = (Employee) emp1.clone();\n\n        // Deep Clone\n        Employee emp3 = emp1.deepClone();\n\n        System.out.println(\"Original: \" + emp1);\n        System.out.println(\"Shallow Copy: \" + emp2);\n        System.out.println(\"Deep Copy: \" + emp3);\n\n        // Modify the original object to see the effect on shallow vs deep copy\n        emp1.address.street = \"456 Avenue\";\n\n        System.out.println(\"After modifying the original object:\");\n        System.out.println(\"Original: \" + emp1);\n        System.out.println(\"Shallow Copy: \" + emp2);\n        System.out.println(\"Deep Copy: \" + emp3);\n    }\n}\n
Output
Original: Employee: Alice, Address: 123 Street, New York\nShallow Copy: Employee: Alice, Address: 123 Street, New York\nDeep Copy: Employee: Alice, Address: 123 Street, New York\n\nAfter modifying the original object:\nOriginal: Employee: Alice, Address: 456 Avenue, New York\nShallow Copy: Employee: Alice, Address: 456 Avenue, New York\nDeep Copy: Employee: Alice, Address: 123 Street, New York\n
Explanation Spring Boot Example

Spring Framework allows defining prototype-scoped beans. Each time you request a bean with the prototype scope, Spring returns a new instance, effectively following the Prototype Pattern.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#how-to-use-prototype-scope-in-spring-boot","title":"How to Use Prototype Scope in Spring Boot","text":"
  1. Add @Scope annotation to the bean definition.
  2. Use prototype scope to ensure each request gets a new object.
"},{"location":"fundamentaldives/DesignPatterns/Prototype/#simple-prototype-scope-example-in-spring-boot","title":"Simple Prototype Scope Example in Spring Boot","text":"Bean Definition
import org.springframework.context.annotation.Scope;\nimport org.springframework.stereotype.Component;\n\n@Component\n@Scope(\"prototype\")\npublic class Employee {\n    public Employee() {\n        System.out.println(\"New Employee instance created.\");\n    }\n}\n
Controller
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class EmployeeController {\n\n    @Autowired\n    private Employee employee1;\n\n    @Autowired\n    private Employee employee2;\n\n    @GetMapping(\"/employees\")\n    public String getEmployees() {\n        return \"Employee 1: \" + employee1 + \" | Employee 2: \" + employee2;\n    }\n}\n
Output

When you run this application and hit the /employees endpoint, you will see:

New Employee instance created.\nNew Employee instance created.\n

This shows that a new instance is created each time a prototype-scoped bean is injected.

"},{"location":"fundamentaldives/DesignPatterns/Prototype/#summary","title":"Summary","text":"

The Prototype Pattern offers an elegant way to clone existing objects, saving the overhead of complex object creation. It fits well when objects are expensive to create or share the same initial configuration. With Java's cloning mechanisms and Spring Boot's prototype scope, it is easy to implement. However, care must be taken when handling deep versus shallow copies, and the pattern should be avoided when objects are inexpensive to create.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/","title":"Singleton","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#what","title":"What ?","text":"

The Singleton Pattern is a creational design pattern that ensures a class has only one instance and provides a global access point to that instance.

Singleton is useful when exactly one instance of a class is needed across the system, like for logging, configuration, database connection pools, etc.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#how-to-implement","title":"How to Implement ?","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#eager-initialization","title":"Eager Initialization","text":"

The instance is created when the class is loaded. This is the simplest way, but it doesn\u2019t support lazy loading.

Eager Initialization Implementation
public class EagerSingleton {\n    private static final EagerSingleton INSTANCE = new EagerSingleton();\n\n    // Private constructor to prevent instantiation\n    private EagerSingleton() {}\n\n    public static EagerSingleton getInstance() {\n        return INSTANCE;\n    }\n}\n

When to Use Eager

When the instance is required throughout the application, and we are okay with it being created at startup.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#lazy-initialization","title":"Lazy Initialization","text":"

The instance is created only when needed (on first access). But this version is not thread-safe.

Lazy Initialization Implementation
public class LazySingleton {\n    private static LazySingleton instance;\n\n    private LazySingleton() {}\n\n    public static LazySingleton getInstance() {\n        if (instance == null) {\n            instance = new LazySingleton();\n        }\n        return instance;\n    }\n}\n

Issue with Lazy

Lazy Initialization is not suitable for multithreaded environments.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#using-synchronized","title":"Using Synchronized","text":"

This solves the issue of thread safety by synchronizing the access method.

Synchronized Implementation
public class ThreadSafeSingleton {\n    private static ThreadSafeSingleton instance;\n\n    private ThreadSafeSingleton() {}\n\n    public static synchronized ThreadSafeSingleton getInstance() {\n        if (instance == null) {\n            instance = new ThreadSafeSingleton();\n        }\n        return instance;\n    }\n}\n

Issue with Synchronized

Performance overhead due to synchronization.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#double-checked-locking","title":"Double-Checked Locking","text":"

This improves the performance by reducing the overhead of synchronized block.

Double-Checked Locking Implementation
public class DoubleCheckedLockingSingleton {\n    private static volatile DoubleCheckedLockingSingleton instance;\n\n    private DoubleCheckedLockingSingleton() {}\n\n    public static DoubleCheckedLockingSingleton getInstance() {\n        if (instance == null) {\n            synchronized (DoubleCheckedLockingSingleton.class) {\n                if (instance == null) {\n                    instance = new DoubleCheckedLockingSingleton();\n                }\n            }\n        }\n        return instance;\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Singleton/#bill-pugh-singleton","title":"Bill Pugh Singleton","text":"

This approach leverages static inner classes, which ensures thread safety and lazy loading without synchronization overhead.

Bill Pugh Singleton Implementation
public class BillPughSingleton {\n    private BillPughSingleton() {}\n\n    // Static inner class responsible for holding the instance\n    private static class SingletonHelper {\n        private static final BillPughSingleton INSTANCE = new BillPughSingleton();\n    }\n\n    public static BillPughSingleton getInstance() {\n        return SingletonHelper.INSTANCE;\n    }\n}\n

Best Practice

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#enum-singleton","title":"Enum Singleton","text":"

This approach is the most concise and prevents issues with serialization and reflection attacks.

Enum Singleton Implementation
public enum EnumSingleton {\n    INSTANCE;\n\n    public void someMethod() {\n        System.out.println(\"Enum Singleton Instance\");\n    }\n}\n

Recommended

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#in-spring-boot","title":"In Spring Boot","text":"

In Spring Boot, Spring\u2019s IoC container (Inversion of Control) makes singleton beans by default. Each bean in Spring is, by default, a singleton. So, you don\u2019t need to explicitly implement the Singleton pattern. Instead, you annotate the class with @Component or @Service, and Spring ensures that only one instance is created and managed.

Spring Boot Example How to init in a Spring Boot application
import org.springframework.stereotype.Component;\n\n@Component\npublic class MySingletonService {\n    public void doSomething() {\n        System.out.println(\"Singleton service is working\");\n    }\n}\n
How to use in a Spring Boot application
import org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.web.bind.annotation.GetMapping;\nimport org.springframework.web.bind.annotation.RestController;\n\n@RestController\npublic class MyController {\n\n    private final MySingletonService singletonService;\n\n    @Autowired\n    public MyController(MySingletonService singletonService) {\n        this.singletonService = singletonService;\n    }\n\n    @GetMapping(\"/test\")\n    public String test() {\n        singletonService.doSomething();\n        return \"Check logs for Singleton Service\";\n    }\n}\n

Note

Spring manages lifecycle and thread safety for you, ensuring it behaves like a Singleton without extra code.

"},{"location":"fundamentaldives/DesignPatterns/Singleton/#comparison","title":"Comparison","text":"Implementation Thread Safety Lazy Initialization Serialization Safe Ease of Implementation Eager Initialization Yes No No Easy Lazy Initialization No Yes No Easy Thread-safe Singleton (Synchronized) Yes Yes No Moderate Double-Checked Locking Singleton Yes Yes No Moderate Bill Pugh Singleton Yes Yes No Best Practice Enum Singleton Yes Yes Yes Recommended"},{"location":"fundamentaldives/DesignPatterns/Singleton/#potential-issues","title":"Potential Issues","text":""},{"location":"fundamentaldives/DesignPatterns/Singleton/#summary","title":"Summary","text":"

The Singleton Pattern is a powerful tool when used appropriately. However, misuse can lead to tightly coupled code, concurrency issues, and testing difficulties.

Note

If you are working with Spring Boot, rely on Spring\u2019s built-in singleton beans instead of implementing your own singleton logic. Where thread safety, serialization, or distributed behavior is required, choose the appropriate Singleton implementation like Enum Singleton or Bill Pugh Singleton.

By default, a single instance of the bean is created and shared across the entire application (singleton scope). If two or more components use the same bean, they will refer to the same instance. However, if you need a new instance every time a bean is requested, you can change the scope to prototype. But be mindful Spring\u2019s singleton scope simplifies things like caching and state consistency, while prototype beans may introduce complexity.

"},{"location":"fundamentaldives/DesignPatterns/Strategy/","title":"Strategy","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#what","title":"What ?","text":"

The Strategy Pattern is a behavioral design pattern that allows you to define a family of algorithms, encapsulate each one, and make them interchangeable. It lets the algorithm vary independently from clients that use it, promoting flexibility, scalability, and separation of concerns.

In simpler terms, It enables selecting a specific algorithm at runtime based on the context, without modifying the client code.

"},{"location":"fundamentaldives/DesignPatterns/Strategy/#when-to-use","title":"When to Use?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#when-not-to-use","title":"When Not to Use?","text":""},{"location":"fundamentaldives/DesignPatterns/Strategy/#how-to-implement","title":"How to Implement ?","text":"Simple Example

Let\u2019s implement a payment system that allows different payment methods using the Strategy Pattern. We will define multiple payment strategies (like Credit Card and PayPal) and switch between them dynamically.

Step-1: Create the Strategy Interface
// PaymentStrategy.java\npublic interface PaymentStrategy {\n    void pay(int amount);\n}\n
Step-2: Implement Concrete Strategies
// CreditCardStrategy.java\npublic class CreditCardStrategy implements PaymentStrategy {\n    private String cardNumber;\n    private String name;\n\n    public CreditCardStrategy(String cardNumber, String name) {\n        this.cardNumber = cardNumber;\n        this.name = name;\n    }\n\n    @Override\n    public void pay(int amount) {\n        System.out.println(amount + \" paid with credit card.\");\n    }\n}\n\n// PayPalStrategy.java\npublic class PayPalStrategy implements PaymentStrategy {\n    private String email;\n\n    public PayPalStrategy(String email) {\n        this.email = email;\n    }\n\n    @Override\n    public void pay(int amount) {\n        System.out.println(amount + \" paid using PayPal.\");\n    }\n}\n
Step-3: Create a Context Class
// PaymentContext.java\npublic class PaymentContext {\n    private PaymentStrategy strategy;\n\n    public PaymentContext(PaymentStrategy strategy) {\n        this.strategy = strategy;\n    }\n\n    public void setPaymentStrategy(PaymentStrategy strategy) {\n        this.strategy = strategy;\n    }\n\n    public void pay(int amount) {\n        strategy.pay(amount);\n    }\n}\n
Step-4: Test the Strategy Pattern
public class StrategyPatternDemo {\n    public static void main(String[] args) {\n        PaymentContext context = new PaymentContext(new CreditCardStrategy(\"1234-5678-9012\", \"John Doe\"));\n        context.pay(100);\n\n        // Switch strategy at runtime\n        context.setPaymentStrategy(new PayPalStrategy(\"john.doe@example.com\"));\n        context.pay(200);\n    }\n}\n
Spring Boot Example

In a Spring Boot application, the Strategy Pattern can be applied by injecting different strategy implementations using Spring\u2019s dependency injection.

Let's build a simple notification service where the user can choose between sending notifications via Email or SMS.

Create the Strategy Interface
// NotificationStrategy.java\npublic interface NotificationStrategy {\n    void sendNotification(String message);\n}\n
Implement the Concrete Strategies
// EmailNotification.java\nimport org.springframework.stereotype.Service;\n\n@Service(\"email\")\npublic class EmailNotification implements NotificationStrategy {\n    @Override\n    public void sendNotification(String message) {\n        System.out.println(\"Sending Email: \" + message);\n    }\n}\n\n// SMSNotification.java\nimport org.springframework.stereotype.Service;\n\n@Service(\"sms\")\npublic class SMSNotification implements NotificationStrategy {\n    @Override\n    public void sendNotification(String message) {\n        System.out.println(\"Sending SMS: \" + message);\n    }\n}\n
Create a Context Class to Use the Strategy
// NotificationContext.java\nimport org.springframework.stereotype.Component;\n\n@Component\npublic class NotificationContext {\n\n    private final Map<String, NotificationStrategy> strategies;\n\n    public NotificationContext(Map<String, NotificationStrategy> strategies) {\n        this.strategies = strategies;\n    }\n\n    public void send(String type, String message) {\n        NotificationStrategy strategy = strategies.get(type);\n        if (strategy == null) {\n            throw new IllegalArgumentException(\"No such notification type\");\n        }\n        strategy.sendNotification(message);\n    }\n}\n
Create the Controller to Use the Notification Service
// NotificationController.java\nimport org.springframework.web.bind.annotation.*;\n\n@RestController\n@RequestMapping(\"/notify\")\npublic class NotificationController {\n\n    private final NotificationContext context;\n\n    public NotificationController(NotificationContext context) {\n        this.context = context;\n    }\n\n    @PostMapping(\"/{type}\")\n    public void sendNotification(@PathVariable String type, @RequestBody String message) {\n        context.send(type, message);\n    }\n}\n
Application Configuration and Running
// Application.java\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\n\n@SpringBootApplication\npublic class Application {\n    public static void main(String[] args) {\n        SpringApplication.run(Application.class, args);\n    }\n}\n
How It Works Alternative Ways

Using Enum-based Strategies: If the algorithms are simple and limited, you can use an enum with methods for strategy logic.

public enum Operation {\n    ADD {\n        @Override\n        public int execute(int a, int b) {\n            return a + b;\n        }\n    },\n    SUBTRACT {\n        @Override\n        public int execute(int a, int b) {\n            return a - b;\n        }\n    };\n\n    public abstract int execute(int a, int b);\n}\n

Using Java 8 Lambdas: Since Java 8, you can use lambdas to avoid creating multiple strategy classes.

import java.util.function.Consumer;\n\npublic class LambdaStrategyDemo {\n    public static void main(String[] args) {\n        Consumer<String> emailStrategy = message -> System.out.println(\"Email: \" + message);\n        Consumer<String> smsStrategy = message -> System.out.println(\"SMS: \" + message);\n\n        emailStrategy.accept(\"Hello via Email!\");\n        smsStrategy.accept(\"Hello via SMS!\");\n    }\n}\n
"},{"location":"fundamentaldives/DesignPatterns/Strategy/#summary","title":"Summary","text":"

The Strategy Pattern is a powerful way to manage dynamic behavior selection in a clean and decoupled way. In a Spring Boot application, you can easily integrate it by using dependency injection. However, it\u2019s essential to use the pattern wisely to avoid unnecessary complexity or overhead. Use it when multiple behaviors or algorithms need to vary independently without modifying client code. Avoid using it if the added complexity is not justified.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/","title":"Concurrency and Parallelism","text":"

Both concurrency and parallelism refer to ways a computer performs multiple tasks, but they differ in how the tasks are executed. Let's go through them, along with related concepts like threads, processes, and programs in this article.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-process","title":"What is Process ?","text":"

A process is an instance of a running program (an executable code). Each process runs in isolation and gets its own memory space. eg: Opening a browser or text editor creates a process.

Characteristics

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-thread","title":"What is Thread ?","text":"

A thread is the smallest unit of execution within a process. Threads run within a process and share the same memory space. eg: A web browser might use multiple threads one for rendering pages, one for handling user input, and another for downloading files.

Characteristics

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#threads-vs-processes","title":"Threads vs Processes","text":"Aspect Threads Processes Memory Space Shared within the same process Separate for each process Overhead Low (lightweight) High (needs its own resources) Communication Easy (shared memory) Complex (requires IPC) Execution Within a single process Each process runs independently Parallelism Can run on multiple cores Can run on multiple cores"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-concurrency","title":"What is Concurrency ?","text":"

Concurrency is when multiple tasks make progress within the same time frame, but not necessarily at the same exact moment. Tasks switch back and forth, sharing resources like CPU time.

Analogy

It\u2019s like a chef preparing multiple dishes, working on one dish for a few minutes, switching to another, and then returning to the previous dish.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#concurrency-with-threads","title":"Concurrency with Threads","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#concurrency-with-processes","title":"Concurrency with Processes","text":"

Note

Concurrency focuses on dealing with multiple tasks by time-sharing resources.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#issues-in-concurrency","title":"Issues in Concurrency","text":"

Below are key issues associated with concurrency.

Race Conditions

When two or more threads/processes try to access and modify shared data simultaneously, the final result may depend on the sequence of execution, leading to unpredictable outcomes.

Example: Two bank transactions updating the same account balance at the same time might result in lost updates.

How to Mitigate

Use synchronization mechanisms like locks or mutexes to control access to shared resources.

Deadlocks

Occurs when two or more threads/processes block each other by holding resources and waiting for resources held by the other. Example: Process A holds Resource 1 and waits for Resource 2, which is held by Process B, and vice versa.

How to Mitigate

Use techniques like resource ordering, deadlock detection, or timeouts to avoid deadlocks.

Livelock

In livelock, threads are constantly changing states to respond to each other but never make actual progress. It\u2019s similar to two people trying to step aside but always stepping in each other\u2019s way. Example: Two threads repeatedly yield control to avoid conflict, but neither progresses.

How to Mitigate

Add randomness or back-off mechanisms to break the cycle.

Starvation

A thread or process may be blocked indefinitely because other higher-priority tasks consume all the resources. Example: A low-priority thread never gets CPU time because high-priority threads always take precedence.

How to Mitigate

Use fair scheduling algorithms to ensure all tasks eventually get a chance to execute.

Context Switching Overhead

Switching between multiple threads or processes incurs a cost, as the CPU saves and restores the state of each thread. Excessive context switching can reduce performance. Example: An overloaded web server with too many threads may spend more time switching contexts than doing actual work.

How to Mitigate

Minimize the number of threads and optimize the task scheduling.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#what-is-parallelism","title":"What is Parallelism ?","text":"

Parallelism is when multiple tasks are executed simultaneously, usually on different processors or cores.

Analogy

It\u2019s like having multiple chefs, each cooking one dish at the same time.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#parallelism-with-threads","title":"Parallelism with Threads","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#parallelism-with-processes","title":"Parallelism with Processes","text":"

Note

Parallelism requires multiple CPUs or cores for real simultaneous execution.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#issues-in-parallelism","title":"Issues in Parallelism","text":"

Below are key issues associated with parallelism.

Load Imbalance

If the workload is not evenly distributed across threads or processes, some cores might remain underutilized while others are overloaded. Example: In a matrix multiplication task, if one thread processes a large chunk and another a small chunk, the first thread might take longer, slowing down the whole task.

How to Mitigate

Use dynamic load balancing or work stealing techniques to distribute the workload effectively.

Scalability Bottlenecks

As more threads or processes are added, the overhead of synchronization and communication increases, limiting performance improvements. Example: A program may scale well with 4 threads but show diminishing returns with 16 threads due to synchronization overhead.

How to Mitigate

Optimize algorithms for scalability and minimize shared resources to reduce synchronization costs.

False Sharing

Occurs when multiple threads on different cores modify variables that are close in memory, leading to unnecessary cache invalidations and reduced performance. Example: Two threads updating variables in the same cache line can cause frequent cache synchronization, slowing execution.

How to Mitigate

Align data properly in memory to avoid false sharing.

Communication Overhead

In parallel systems, threads or processes may need to communicate with each other, which adds overhead. Example: In distributed computing, passing messages between nodes can slow down the computation.

How to Mitigate

Reduce communication frequency or use message batching techniques to minimize overhead.

Debugging and Testing Complexity

Debugging concurrent or parallel programs is harder because issues like race conditions or deadlocks may only appear under specific conditions, making them difficult to reproduce. Example: A race condition might only occur when threads execute in a specific order, which is hard to detect in testing.

How to Mitigate

Use debugging tools like thread analyzers and log events to trace execution paths.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#example-scenarios","title":"Example Scenarios","text":"

Concurrent Programming (e.g., in Java, Python)

Parallel Programming (e.g., using Python's multiprocessing or CUDA for GPU computation)

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#common-issues","title":"Common Issues","text":"Data Consistency and Synchronization

Ensuring that shared data remains consistent when accessed by multiple threads or processes is challenging. Example: If multiple threads increment the same counter, the final result may be incorrect without proper synchronization.

How to Mitigate

Use locks, semaphores, or atomic operations to ensure data consistency.

Performance Trade-offs

Parallel or concurrent execution does not always lead to better performance. In some cases, overhead from synchronization, communication, and context switching can negate performance gains. Example: A parallel algorithm may run slower on a small dataset due to the overhead of managing multiple threads.

How to Mitigate

Assess whether the overhead is justified and use profiling tools to analyze performance.

Non-Deterministic Behavior

In concurrent and parallel systems, the order of execution is not guaranteed, leading to non-deterministic results. Example: Running the same multi-threaded program twice may produce different outcomes, making testing and debugging difficult.

How to Mitigate

Use locks and barriers carefully, and design programs to tolerate or avoid non-determinism where possible.

Resource Contention

Threads and processes compete for shared resources, such as memory, I/O, and network bandwidth, leading to bottlenecks. Example: Multiple processes writing to the same disk simultaneously may degrade performance.

How to Mitigate

Optimize resource usage and avoid unnecessary contention by reducing shared resources.

"},{"location":"fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/#summary","title":"Summary","text":"

Concurrency deals with multiple tasks making progress within the same period (may or may not be simultaneous) whereas Parallelism deals with tasks running simultaneously on different cores or processors.

Processes and threads are core to both concurrency and parallelism, with threads sharing memory within a process and processes running independently with isolated memory.

While concurrency and parallelism offer significant benefits, they also come with substantial challenges. Managing issues such as race conditions, deadlocks, false sharing, and debugging complexity requires thoughtful design and appropriate use of synchronization techniques. Additionally, scalability bottlenecks and communication overhead can limit the effectiveness of parallel systems.

To mitigate these issues, some fixes are:

"},{"location":"fundamentaldives/FundamentalPrinciples/DRY/","title":"DRY Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#what","title":"What ?","text":"

The DRY Principle stands for Don\u2019t Repeat Yourself. It is a fundamental software development principle aimed at reducing repetition of code and logic. The main idea is that duplication introduces potential risks, if you need to update logic in multiple places, you might forget some, leading to bugs and inconsistencies. When applied well, it improves maintainability, scalability, and clarity, yupp something that lot of codebases misses.

\"Every piece of knowledge must have a single, unambiguous, authoritative representation within a system.\"

In other words, the DRY principle encourages developers to write modular, reusable code and avoid duplicating the same functionality in multiple places. It encourages us to minimize redundancy and write code that does one thing well, making our lives (and the lives of those who maintain our code) much easier.

"},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#how-to-use-example","title":"How to Use Example ?","text":"

Let's cover an example where we apply DRY principle to refactor duplicated logic into reusable.

Without DRY (Code Duplication) Example
public class OrderService {\n\n    public void placeOrder(int productId, int quantity) {\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n        // Logic to place order\n        System.out.println(\"Order placed for product: \" + productId);\n    }\n\n    public void cancelOrder(int orderId) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n        // Logic to cancel order\n        System.out.println(\"Order cancelled: \" + orderId);\n    }\n\n    public void updateOrder(int orderId, int quantity) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n        // Logic to update order\n        System.out.println(\"Order updated with new quantity: \" + quantity);\n    }\n}\n
Explanation

We can extract the common logic into a reusable private method to apply the DRY principle.

With DRY (Refactored Code) Example
public class OrderService {\n\n    public void placeOrder(int productId, int quantity) {\n        validateQuantity(quantity);\n        // Logic to place order\n        System.out.println(\"Order placed for product: \" + productId);\n    }\n\n    public void cancelOrder(int orderId) {\n        validateOrderId(orderId);\n        // Logic to cancel order\n        System.out.println(\"Order cancelled: \" + orderId);\n    }\n\n    public void updateOrder(int orderId, int quantity) {\n        validateOrderId(orderId);\n        validateQuantity(quantity);\n        // Logic to update order\n        System.out.println(\"Order updated with new quantity: \" + quantity);\n    }\n\n    // Reusable validation methods\n    private void validateOrderId(int orderId) {\n        if (orderId <= 0) {\n            throw new IllegalArgumentException(\"Order ID must be greater than zero.\");\n        }\n    }\n\n    private void validateQuantity(int quantity) {\n        if (quantity <= 0) {\n            throw new IllegalArgumentException(\"Quantity must be greater than zero.\");\n        }\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/DRY/#summary","title":"Summary","text":"

The DRY principle ensures that code duplication is minimized for easier maintenance and improved consistency. In our example, we extracted common validation logic to private methods, adhering to DRY and making the code more maintainable. However, always be careful to avoid over-abstraction, as not all code repetition is bad. The goal is to achieve a balance between simplicity and reusability.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/","title":"KISS Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#what","title":"What ?","text":"

The KISS Principle stands for \"Keep It Simple, Stupid\". It\u2019s a design principle that emphasizes simplicity, stating that systems and code work best if they are kept simple rather than made unnecessarily complex. The main idea is to avoid over-engineering and unnecessary complications, which can introduce more bugs, make the code harder to maintain, and increase development time.

KISS encourages developers to create code or solutions that are easy to understand, maintain, and modify. The idea is not to use complicated approaches or unnecessary abstractions when a simpler, more straightforward approach will do.

\u201cSimple\u201d here doesn\u2019t mean incomplete or simplistic it means clear, focused, and straightforward.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#why-to-use","title":"Why to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#where-not-to-use","title":"Where Not to Use ?","text":"

While simplicity is valuable, there are situations where the KISS principle might not apply fully. Over-simplifying can sometimes lead to problems.

"},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#how-to-use-in-practice","title":"How to Use in Practice ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#how-to-use-example","title":"How to Use Example ?","text":"

Let's Consider a example where we want to check if a number is even or odd.

Non-KISS Complex Code
public class NumberUtils {\n    public static boolean isEven(int number) {\n        return isDivisibleByTwo(number);\n    }\n\n    private static boolean isDivisibleByTwo(int number) {\n        if (number % 2 == 0) {\n            return true;\n        } else {\n            return false;\n        }\n    }\n\n    public static void main(String[] args) {\n        System.out.println(\"Is 4 even? \" + isEven(4));\n    }\n}\n
Explanation KISS Simpler Code
public class NumberUtils {\n    public static boolean isEven(int number) {\n        return number % 2 == 0;\n    }\n\n    public static void main(String[] args) {\n        System.out.println(\"Is 4 even? \" + isEven(4));\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/KISS/#summary","title":"Summary","text":"

The KISS principle encourages developers to create simple, maintainable, and readable code, it means avoiding unnecessary complexity. However, KISS must be balanced some projects or scenarios require complexity to meet performance, modularity, or security needs. In the end, applying KISS is about striking the right balance making the solution as simple as possible, but not simpler than necessary.

"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/","title":"SOLID Principles","text":""},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#single-responsibility-principle","title":"Single Responsibility Principle","text":"

A class should have only one reason to change. This means that each class should focus on a single responsibility or feature.

Violation Example
// Violates SRP: User class has multiple responsibilities.\npublic class User {\n    private String name;\n    private String email;\n\n    public void sendEmail(String message) {\n        // Sending email logic here...\n        System.out.println(\"Email sent to \" + email);\n    }\n\n    public void saveUser() {\n        // Save user to the database\n        System.out.println(\"User saved to DB\");\n    }\n}\n
Fixed Example
// Separate responsibilities into different classes.\npublic class User {\n    private String name;\n    private String email;\n\n    // Getters and setters...\n}\n\npublic class UserRepository {\n    public void save(User user) {\n        System.out.println(\"User saved to DB\");\n    }\n}\n\npublic class EmailService {\n    public void sendEmail(User user, String message) {\n        System.out.println(\"Email sent to \" + user.getEmail());\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#open-closed-principle","title":"Open Closed Principle","text":"

Software components (classes, functions, etc.) should be open for extension but closed for modification. You shouldn\u2019t modify existing code to add new behavior instead, extend it.

Violation Example
// Violates OCP: PaymentProcessor needs to be modified for new payment types.\npublic class PaymentProcessor {\n    public void pay(String type) {\n        if (type.equals(\"credit\")) {\n            System.out.println(\"Processing credit card payment...\");\n        } else if (type.equals(\"paypal\")) {\n            System.out.println(\"Processing PayPal payment...\");\n        }\n    }\n}\n
Fixed Example
// Use an interface for extensibility.\ninterface PaymentMethod {\n    void pay();\n}\n\npublic class CreditCardPayment implements PaymentMethod {\n    public void pay() {\n        System.out.println(\"Processing credit card payment...\");\n    }\n}\n\npublic class PayPalPayment implements PaymentMethod {\n    public void pay() {\n        System.out.println(\"Processing PayPal payment...\");\n    }\n}\n\npublic class PaymentProcessor {\n    public void processPayment(PaymentMethod paymentMethod) {\n        paymentMethod.pay();\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#liskov-substitution-principle","title":"Liskov Substitution Principle","text":"

Subclasses should be substitutable for their base class without altering the correctness of the program.

Violation Example
// Violates LSP: Square changes the behavior of Rectangle.\nclass Rectangle {\n    protected int width, height;\n\n    public void setWidth(int width) {\n        this.width = width;\n    }\n\n    public void setHeight(int height) {\n        this.height = height;\n    }\n\n    public int getArea() {\n        return width * height;\n    }\n}\n\nclass Square extends Rectangle {\n    @Override\n    public void setWidth(int width) {\n        this.width = width;\n        this.height = width;  // Violates LSP: Unexpected behavior.\n    }\n\n    @Override\n    public void setHeight(int height) {\n        this.width = height;\n        this.height = height;\n    }\n}\n
Fixed Example
// Use separate classes to maintain correct behavior.\nclass Shape {\n    public int getArea() {\n        return 0;\n    }\n}\n\nclass Rectangle extends Shape {\n    protected int width, height;\n\n    public Rectangle(int width, int height) {\n        this.width = width;\n        this.height = height;\n    }\n\n    @Override\n    public int getArea() {\n        return width * height;\n    }\n}\n\nclass Square extends Shape {\n    private int side;\n\n    public Square(int side) {\n        this.side = side;\n    }\n\n    @Override\n    public int getArea() {\n        return side * side;\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#interface-segregation-principle","title":"Interface Segregation Principle","text":"

A client should not be forced to implement interfaces that it does not use. Instead, smaller, more specific interfaces should be preferred.

Violation Example
// Violates ISP: Cat(r) needs to implement unnecessary methods.\ninterface Vehicle {\n    void drive();\n    void fly();\n}\n\nclass Car implements Vehicle {\n    @Override\n    public void drive() {\n        System.out.println(\"Car is driving...\");\n    }\n\n    @Override\n    public void fly() {\n        // Car can't fly! This method is unnecessary.\n        throw new UnsupportedOperationException(\"Car can't fly\");\n    }\n}\n
Fixed Example
// Use separate interfaces for each capability.\ninterface Drivable {\n    void drive();\n}\n\ninterface Flyable {\n    void fly();\n}\n\nclass Car implements Drivable {\n    @Override\n    public void drive() {\n        System.out.println(\"Car is driving...\");\n    }\n}\n\nclass Plane implements Drivable, Flyable {\n    @Override\n    public void drive() {\n        System.out.println(\"Plane is taxiing...\");\n    }\n\n    @Override\n    public void fly() {\n        System.out.println(\"Plane is flying...\");\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#dependency-inversion-principle","title":"Dependency Inversion Principle","text":"

High-level modules should not depend on low-level modules. Both should depend on abstractions.

Violation Example
// Violates DIP: High-level class depends on a specific implementation.\nclass SQLDatabase {\n    public void connect() {\n        System.out.println(\"Connected to SQL Database\");\n    }\n}\n\nclass Application {\n    private SQLDatabase database;\n\n    public Application() {\n        database = new SQLDatabase();  // Tight coupling to SQLDatabase.\n    }\n\n    public void start() {\n        database.connect();\n    }\n}\n
Fixed Example
// Depend on an abstraction instead of a specific implementation.\ninterface Database {\n    void connect();\n}\n\nclass SQLDatabase implements Database {\n    public void connect() {\n        System.out.println(\"Connected to SQL Database\");\n    }\n}\n\nclass NoSQLDatabase implements Database {\n    public void connect() {\n        System.out.println(\"Connected to NoSQL Database\");\n    }\n}\n\nclass Application {\n    private Database database;\n\n    public Application(Database database) {\n        this.database = database;\n    }\n\n    public void start() {\n        database.connect();\n    }\n}\n
"},{"location":"fundamentaldives/FundamentalPrinciples/SOLID/#summary","title":"Summary","text":"Principle Definition Violation Example Fixed Example Single Responsibility A class should have only one reason to change. User class manages both data and emails. Separate User, EmailService, UserRepository. Open Closed Open for extension, closed for modification. Modify PaymentProcessor for new methods. Use PaymentMethod interface and extend classes. Liskov Substitution Subtypes should behave like their base type. Square modifies behavior of Rectangle. Separate Square and Rectangle classes. Interface Segregation Use small, specific interfaces. Car implements unnecessary fly() method. Split into Drivable and Flyable interfaces. Dependency Inversion Depend on abstractions, not implementations. App depends on SQLDatabase directly. Use Database interface for loose coupling."},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/","title":"YAGNI Principle","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#what","title":"What ?","text":"

YAGNI stands for \"You Aren\u2019t Gonna Need It.\" It is one of the core principles of Extreme Programming (XP) and Agile development. The principle advises developers not to add any functionality or code until it is truly needed. Essentially, YAGNI promotes simplicity and avoids speculative development.

Always implement things when you actually need them, never when you just foresee you might need them.

"},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#why-use","title":"Why Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#when-to-use","title":"When to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#how-to-apply","title":"How to Apply ?","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#advantages","title":"Advantages","text":""},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#how-to-use-example","title":"How to Use Example ?","text":"

Lets go through a simple example to illustrate YAGNI in practice.

Without YAGNI Example Adding Unnecessary Functionality
public class UserService {\n\n    // Current functionality: Retrieve a user by ID\n    public User getUserById(int id) {\n        // Logic to retrieve user\n        return new User(id, \"John Doe\");\n    }\n\n    // Unnecessary feature: Speculating that we may need a \"deleteUser\" method in the future\n    public void deleteUser(int id) {\n        // Logic to delete user (unimplemented)\n        System.out.println(\"User deleted: \" + id);\n    }\n\n    // Another unnecessary feature: Thinking we might need email notifications\n    public void sendEmailNotification(User user) {\n        // Logic to send email (unimplemented)\n        System.out.println(\"Email sent to: \" + user.getEmail());\n    }\n}\n\nclass User {\n    private int id;\n    private String name;\n\n    public User(int id, String name) {\n        this.id = id;\n        this.name = name;\n    }\n\n    public String getEmail() {\n        return name.toLowerCase() + \"@example.com\";\n    }\n}\n
Explanation Applying YAGNI Only Implement What is Needed Now
public class UserService {\n\n    // Only add the necessary method for now\n    public User getUserById(int id) {\n        // Logic to retrieve user\n        return new User(id, \"John Doe\");\n    }\n}\n\nclass User {\n    private int id;\n    private String name;\n\n    public User(int id, String name) {\n        this.id = id;\n        this.name = name;\n    }\n}\n
Explanation "},{"location":"fundamentaldives/FundamentalPrinciples/YAGNI/#summary","title":"Summary","text":"

The YAGNI principle encourages developers to focus on delivering only the required features at a given point in time, avoiding speculative development that may never be used. This approach fosters simplicity, maintainability, and efficiency in the codebase. However, it should be applied carefully there are scenarios (like architecture or security) where anticipating needs is necessary, When used properly, YAGNI helps teams build better software, faster, and with fewer headaches down the line.

"},{"location":"langdives/Java/4Pillars/","title":"4 Pillars - What, How, and Why ?","text":""},{"location":"langdives/Java/4Pillars/#encapsulation","title":"Encapsulation","text":"

What: Hiding the internal details of an object and only exposing necessary parts through public methods.

Why: It helps in data hiding and ensures controlled access to variables.

How: Use private variables to restrict direct access and provide getters and setters to access and modify the data.

Encapsulation Example
public class Person {\n    private String name; // Encapsulated field\n\n    // Getter\n    public String getName() {\n        return name;\n    }\n\n    // Setter\n    public void setName(String name) {\n        this.name = name;\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#inheritance","title":"Inheritance","text":"

What: Allows a class (child/subclass) to acquire the properties and behaviors of another class (parent/superclass).

How: Use the extends keyword.

Why: Promotes code reusability and establishes a parent-child relationship.

Inheritance Example
class Animal {\n    public void sound() {\n        System.out.println(\"Animals make sound\");\n    }\n}\n\nclass Dog extends Animal {\n    @Override\n    public void sound() {\n        System.out.println(\"Dog barks\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#polymorphism","title":"Polymorphism","text":"

What: Ability to process objects differently based on their data type or class.

Why: Increases flexibility and supports dynamic method invocation.

Polymorphism Example Method Overloading
class Calculator {\n    public int add(int a, int b) {\n        return a + b;\n    }\n\n    public double add(double a, double b) {\n        return a + b;\n    }\n}\n
Method Overriding
class Animal {\n    public void sound() {\n        System.out.println(\"Animal makes sound\");\n    }\n}\n\nclass Cat extends Animal {\n    @Override\n    public void sound() {\n        System.out.println(\"Cat meows\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#abstraction","title":"Abstraction","text":"

What: Hiding the complex implementation details and only exposing the essential features.

Why: Helps in achieving modularity and loose coupling between components.

How: Use abstract classes and interfaces.

Abstraction Example Abstract Class Example
abstract class Vehicle {\n    abstract void start();\n}\n\nclass Car extends Vehicle {\n    @Override\n    void start() {\n        System.out.println(\"Car starts with a key\");\n    }\n}\n
Interface Example
interface Animal {\n    void eat();\n}\n\nclass Dog implements Animal {\n    @Override\n    public void eat() {\n        System.out.println(\"Dog eats bones\");\n    }\n}\n
"},{"location":"langdives/Java/4Pillars/#summary","title":"Summary","text":"Aspect Encapsulation Inheritance Polymorphism Abstraction Definition Bundling data and methods together and restricting access to data. Mechanism for a subclass to acquire properties of a parent class. Allowing methods to take different forms (overloading/overriding). Hiding implementation details while showing only essential features. Focus Protecting data and providing controlled access. Code reuse and establishing a parent-child hierarchy. Dynamic behavior based on object type. Simplifying complex systems by exposing only key details. Achieved Through Using private fields, and public getters/setters. Using the extends keyword to derive subclasses. Overloading (compile-time) and overriding (runtime). Using abstract classes or interfaces. Key Benefit Data hiding and modular code. Reduces redundancy and promotes code reuse. Flexibility and extensibility of behavior. Promotes loose coupling and modularity. Access Modifiers Requires private, protected, or public. Involves all inheritance-accessible modifiers. Leverages method visibility across class hierarchies. Abstract methods can be protected or public (not private). Real-World Analogy A capsule with medicine inside it hides the internal components. A child inheriting traits from their parents. A shape object behaving differently as circle/square. A remote control exposing buttons without showing internal circuits. Code Dependency Independent within the class. Dependent on parent-child relationship. Involves multiple forms of a single method/class. Can work with unrelated classes sharing common behavior."},{"location":"langdives/Java/AccessModifPPPPP/","title":"Access modifiers","text":""},{"location":"langdives/Java/AccessModifPPPPP/#public","title":"Public","text":"

Keyword: public Access: Accessible from anywhere (inside/outside the class, package, or project). Usage: Typically used for classes, methods, and variables that need global access.

Public Example
public class MyClass {\n    public int value = 10;\n    public void display() {\n        System.out.println(\"Public method\");\n    }\n}\n
"},{"location":"langdives/Java/AccessModifPPPPP/#private","title":"Private","text":"

Keyword: private Access: Accessible only within the same class. Usage: Used to hide class fields or methods, following the principle of encapsulation.

Private Example
public class MyClass {\n    private int value = 10; // Not accessible outside this class\n\n    private void display() {\n        System.out.println(\"Private method\");\n    }\n}\n
"},{"location":"langdives/Java/AccessModifPPPPP/#protected","title":"Protected","text":"

Keyword: protected Access: Accessible within the same package and by subclasses (even if outside the package). Usage: Useful when extending classes across packages.

Protected Example
public class MyClass {\n    protected int value = 10;\n\n    protected void display() {\n        System.out.println(\"Protected method\");\n    }\n}\n

Note

If accessed by a subclass in a different package, it must be through inheritance (not directly via an instance).

"},{"location":"langdives/Java/AccessModifPPPPP/#package-private","title":"Package-Private","text":"

Keyword: No keyword (Default Access) Access: Accessible only within the same package. Usage: Used for classes and members that don\u2019t need to be accessed outside their package.

Package-Private Example
class MyClass { // No access modifier, so it's package-private\n    int value = 10;\n\n    void display() {\n        System.out.println(\"Package-private method\");\n    }\n}\n

Note

Package-private is the default if no modifier is specified.

"},{"location":"langdives/Java/AccessModifPPPPP/#access-summary","title":"Access Summary","text":"Modifier Same Class Same Package Subclass (Different Package) Other Packages public \u2705 \u2705 \u2705 \u2705 protected \u2705 \u2705 \u2705 (via inheritance) \u274c (default) \u2705 \u2705 \u274c \u274c private \u2705 \u274c \u274c \u274c"},{"location":"langdives/Java/Collections-JCF/","title":"Java Collections Framework","text":""},{"location":"langdives/Java/Collections-JCF/#categories-of-collections","title":"Categories of Collections","text":""},{"location":"langdives/Java/Collections-JCF/#list","title":"List","text":""},{"location":"langdives/Java/Collections-JCF/#arraylist","title":"ArrayList","text":"

A resizable array, fast random access. It's backed by Array, When random access is needed and insertions are rare you can use this.

Operations & Complexities

Thread Safety: Not synchronized, use Collections.synchronizedList() for thread safety.

Example
List<String> list = new ArrayList<>();\nlist.add(\"Apple\");\nlist.get(0);  // Fast access\n
"},{"location":"langdives/Java/Collections-JCF/#linkedlist","title":"LinkedList","text":"

A Doubly linked list, better at frequent insertions and deletions. It's backed by Doubly Linked List, When insertion/deletion in the middle is frequent you can use this.

Operations & Complexities

Thread Safety: Not synchronized.

Example
List<String> list = new LinkedList<>();\nlist.add(\"Banana\");\nlist.addFirst(\"Apple\");  // O(1) insertion at head\n
"},{"location":"langdives/Java/Collections-JCF/#set","title":"Set","text":""},{"location":"langdives/Java/Collections-JCF/#hashset","title":"HashSet","text":"

It's Unordered, no duplicates, backed by Hash Table. You can use this When you need fast lookups and no duplicates.

Operations & Complexities

Thread Safety: Not synchronized, use Collections.synchronizedSet().

Example
Set<String> set = new HashSet<>();\nset.add(\"Cat\");\nset.add(\"Dog\");\n
"},{"location":"langdives/Java/Collections-JCF/#linkedhashset","title":"LinkedHashSet","text":"

This maintains insertion order, backed by a Hash Table + Linked List. You can use this when you need order-preserving behavior.

Operations & Complexities: Same as HashSet (O(1) operations) but with slightly higher overhead due to linked list maintenance.

Example
Set<String> set = new LinkedHashSet<>();\nset.add(\"Apple\");\nset.add(\"Banana\");\n
"},{"location":"langdives/Java/Collections-JCF/#treeset","title":"TreeSet","text":"

A Sorted set, backed by Red-Black Tree, When you need sorted data.

Operations & Complexities

Thread Safety: Not synchronized.

Example
Set<Integer> set = new TreeSet<>();\nset.add(5);\nset.add(1);  // Sorted automatically\n
"},{"location":"langdives/Java/Collections-JCF/#queuedeque","title":"Queue/Deque","text":""},{"location":"langdives/Java/Collections-JCF/#priorityqueue","title":"PriorityQueue","text":"

Elements are ordered based on their natural order or a custom comparator. It's backed by Binary Heap, When priority-based retrieval is needed.

Operations & Complexities

Example
Queue<Integer> queue = new PriorityQueue<>();\nqueue.add(10);\nqueue.add(5);  // 5 will be at the top\n
"},{"location":"langdives/Java/Collections-JCF/#arraydeque","title":"ArrayDeque","text":"

Resizable-array-backed deque, allows adding/removing from both ends, When you need both stack and queue operations.

Operations & Complexities

Example
Deque<String> deque = new ArrayDeque<>();\ndeque.addFirst(\"First\");\ndeque.addLast(\"Last\");\n
"},{"location":"langdives/Java/Collections-JCF/#map","title":"Map","text":""},{"location":"langdives/Java/Collections-JCF/#hashmap","title":"HashMap","text":"

Stores key-value pairs, backed by Hash Table, Fast lookups for key-value pairs.

Operations & Complexities

Thread Safety: Not synchronized, use ConcurrentHashMap for thread-safe operations.

Example
Map<String, Integer> map = new HashMap<>();\nmap.put(\"Apple\", 1);\nmap.put(\"Banana\", 2);\n
"},{"location":"langdives/Java/Collections-JCF/#linkedhashmap","title":"LinkedHashMap","text":"

Maintains insertion order, backed by Hash Table + Linked List, When ordering of entries matters.

Example
Map<String, Integer> map = new LinkedHashMap<>();\nmap.put(\"Apple\", 1);\nmap.put(\"Banana\", 2);  // Maintains insertion order\n
"},{"location":"langdives/Java/Collections-JCF/#treemap","title":"TreeMap","text":"

Sorted map, backed by Red-Black Tree, When you need a sorted key-value store.

Operations & Complexities: Get/Put/Remove: O(log n)

Example
Map<Integer, String> map = new TreeMap<>();\nmap.put(3, \"Three\");\nmap.put(1, \"One\");  // Sorted by key\n
"},{"location":"langdives/Java/Collections-JCF/#synchronized-collections","title":"Synchronized Collections","text":"

Synchronized Wrappers: Use Collections.synchronizedList() or Collections.synchronizedSet() to make collections thread-safe.

Example
List<String> list = Collections.synchronizedList(new ArrayList<>());\n

Concurrent Collections: Use ConcurrentHashMap, CopyOnWriteArrayList, or BlockingQueue for better thread-safe alternatives.

"},{"location":"langdives/Java/Collections-JCF/#summary","title":"Summary","text":"Collection Type Backed By Access Time Insertion Time Deletion Time Thread Safety Use Case ArrayList List Resizable Array O(1) O(1) (amortized) O(n) No Fast random access LinkedList List Doubly Linked List O(n) O(1) O(1) No Frequent insertions/deletions HashSet Set Hash Table - O(1) O(1) No Unique elements LinkedHashSet Set Hash Table + Linked List - O(1) O(1) No Unique elements with insertion order TreeSet Set Red-Black Tree - O(log n) O(log n) No Sorted elements PriorityQueue Queue Binary Heap - O(log n) O(log n) No Priority-based retrieval ArrayDeque Deque Resizable Array - O(1) O(1) No Both stack and queue operations HashMap Map Hash Table - O(1) O(1) No Fast key-value lookups LinkedHashMap Map Hash Table + Linked List - O(1) O(1) No Key-value lookups with insertion order TreeMap Map Red-Black Tree - O(log n) O(log n) No Sorted key-value pairs ConcurrentHashMap Concurrent Map Segmented Hash Table O(1) O(1) O(1) Yes Thread-safe map CopyOnWriteArrayList Concurrent List Array O(n) O(1) O(1) Yes Thread-safe list BlockingQueue Concurrent Queue Queue/Linked Nodes Depends on impl. O(1) O(1) Yes Thread-safe queue"},{"location":"langdives/Java/GarbageCollection/","title":"Garbage Collection","text":"

Garbage collection (GC) in Java is essential to automatic memory management, ensuring that objects no longer needed by an application are reclaimed, and the memory they occupied is freed. This allows Java developers to avoid memory leaks and other resource-management issues.

"},{"location":"langdives/Java/GarbageCollection/#basics","title":"Basics","text":"

Heap Memory is divided into several areas, mainly

How GC Works ? Simply GC works by going through phases

"},{"location":"langdives/Java/GarbageCollection/#phases","title":"Phases","text":""},{"location":"langdives/Java/GarbageCollection/#types-of-collectors","title":"Types of Collectors","text":""},{"location":"langdives/Java/GarbageCollection/#serial-garbage-collector","title":"Serial Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#parallel-garbage-collector","title":"Parallel Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#cms-collector","title":"CMS Collector","text":""},{"location":"langdives/Java/GarbageCollection/#g1-garbage-collector","title":"G1 Garbage Collector","text":""},{"location":"langdives/Java/GarbageCollection/#z-garbage-collector-zgc","title":"Z Garbage Collector (ZGC)","text":""},{"location":"langdives/Java/GarbageCollection/#shenandoah-gc","title":"Shenandoah GC","text":""},{"location":"langdives/Java/GarbageCollection/#comparing-collectors","title":"Comparing Collectors","text":"Collector Use Case Pause Time Heap Size Parallelism Serial Small apps, single-threaded High Small (<1 GB) Single-threaded Parallel Throughput-heavy apps Moderate Medium to large Multi-threaded CMS Low-latency (deprecated) Low Medium to large Concurrent G1 Balanced throughput & latency Predictable Large Mixed ZGC Ultra-low latency, huge heaps Sub-millisecond Multi-TB Highly concurrent Shenandoah Latency-sensitive, large heaps Sub-millisecond Multi-TB Highly concurrent

Note

ZGC and Shenandoah use advanced algorithms that perform incremental marking, remapping, and concurrent sweeping. They avoid long pauses by offloading most GC work to concurrent threads.

"},{"location":"langdives/Java/GarbageCollection/#garbage-collection-concepts","title":"Garbage Collection Concepts","text":""},{"location":"langdives/Java/GarbageCollection/#generational-collection","title":"Generational Collection","text":"

Java heap is divided into:

Garbage collection types:

"},{"location":"langdives/Java/GarbageCollection/#advanced-memory-layout","title":"Advanced Memory Layout","text":"

Dynamic Region Management:

"},{"location":"langdives/Java/GarbageCollection/#safepoints","title":"Safepoints","text":"

Java threads stop only at specific safepoints for GC (or other JVM activities). A safepoint is a checkpoint where all threads must reach before GC can start for eg Executing bytecode that triggers allocation failure, method calls, or back branches in loops.

The JVM injects safepoint polls within running code to ensure threads hit these safepoints regularly, Too many safepoint pauses indicate GC tuning issues or excessive thread blocking.

JVM may delay triggering GC due to waiting for all threads to reach a safepoint, which introduces unpredictable latency. This is critical in low-latency systems like trading applications.

"},{"location":"langdives/Java/GarbageCollection/#stop-the-world-stw","title":"Stop the World (STW)","text":"

A Stop-The-World (STW) event occurs when the garbage collector (GC) halts all application threads to perform critical tasks like marking live objects, reclaiming memory, and compacting the heap. These pauses, necessary to prevent heap inconsistencies, impact application performance, especially in latency-sensitive environments.

The duration of STW events depends on heap size, the number of live objects, and the GC algorithm. Traditional collectors like Serial GC and Parallel GC have longer STW pauses, while CMS reduces them with concurrent marking but still requires short pauses for initial and final marking. Modern GCs like G1 GC, ZGC, and Shenandoah GC minimize pauses by performing most work concurrently with application threads, achieving millisecond-range STW durations.

Optimizations include using low-latency collectors, tuning GC settings, reducing allocation pressure, and monitoring GC behavior with tools like JFR or VisualVM. For latency-critical applications, advanced collectors and careful memory management are essential to mitigate the impact of STW events.

"},{"location":"langdives/Java/GarbageCollection/#barriers-tables-fences","title":"Barriers, Tables & Fences","text":""},{"location":"langdives/Java/GarbageCollection/#write-barriers","title":"Write Barriers","text":""},{"location":"langdives/Java/GarbageCollection/#card-tables","title":"Card Tables","text":""},{"location":"langdives/Java/GarbageCollection/#memory-fences","title":"Memory Fences","text":""},{"location":"langdives/Java/GarbageCollection/#hierarchy","title":"Hierarchy","text":""},{"location":"langdives/Java/GarbageCollection/#allocation","title":"Allocation","text":"

New Object Creation

Minor GC (Young Generation Collection)

"},{"location":"langdives/Java/GarbageCollection/#thresholds-promotions","title":"Thresholds & Promotions","text":"

Max Tenuring Threshold

Promotion Failures

"},{"location":"langdives/Java/GarbageCollection/#major-gc-old-generation","title":"Major GC & Old Generation","text":"

When Old Generation Fills Up

Concurrent Collectors (CMS, G1, ZGC, Shenandoah)

"},{"location":"langdives/Java/GarbageCollection/#full-gc-stop-the-world-event","title":"Full GC (Stop-the-World Event)","text":"

What Causes Full GC?

What Happens During Full GC

"},{"location":"langdives/Java/GarbageCollection/#safepoints-write-barriers","title":"Safepoints & Write Barriers","text":"

Safepoints

Write Barriers

"},{"location":"langdives/Java/GarbageCollection/#finalization-reference-types","title":"Finalization & Reference Types","text":"

Soft, Weak, and Phantom References

"},{"location":"langdives/Java/GarbageCollection/#gc-flow-structure","title":"GC Flow Structure","text":"

GC Flow

  1. Object Creation

    • Allocated in Eden Space (Young Gen).
  2. Eden Full \u2192 Trigger Minor GC

    • Mark live objects and move them to Survivor Spaces.
    • Objects surviving multiple cycles move to the Old Generation.
  3. Old Gen Full \u2192 Trigger Major GC

    • Mark and sweep objects in the Old Gen.
    • If heap is fragmented, trigger Full GC.
  4. Concurrent Collections (G1, ZGC)

    • Perform marking and sweeping concurrently without stopping the world.
  5. Full GC (Stop-the-World)

    • When all else fails, Full GC freezes all threads, marks, sweeps, and compacts memory.
"},{"location":"langdives/Java/GarbageCollection/#fragmentation","title":"Fragmentation","text":"

Fragmentation refers to the inefficient use of memory that occurs when free memory is split into small, non-contiguous blocks, making it difficult to allocate larger contiguous blocks even if the total free memory is sufficient. In Java, fragmentation can occur in both the young and old generations of the heap.

"},{"location":"langdives/Java/GarbageCollection/#types","title":"Types","text":"

Internal Fragmentation: Occurs when a block of memory is larger than what is actually needed. For example, if an object requires 10 bytes but is allocated a 16-byte block, the remaining 6 bytes are wasted.

External Fragmentation: Happens when free memory is scattered in small chunks across the heap. This can lead to a situation where there isn\u2019t enough contiguous space available to fulfill a large allocation request, even if the total free memory is sufficient.

"},{"location":"langdives/Java/GarbageCollection/#causes","title":"Causes","text":"

Object Lifetimes: Short-lived objects are frequently allocated and deallocated, especially in the young generation. This can create gaps in memory as these objects are collected, leading to external fragmentation.

Promotion of Objects: When objects in the young generation are promoted to the old generation, if the old generation is already fragmented, it may become difficult to allocate new objects.

Full GCs: In collectors like CMS (Concurrent Mark-Sweep), memory is reclaimed but not compacted, leaving fragmented free spaces.

"},{"location":"langdives/Java/GarbageCollection/#effects","title":"Effects","text":"

OutOfMemoryError: Fragmentation can cause allocation failures, leading to OutOfMemoryError if there isn\u2019t enough contiguous memory available for new object allocations.

Increased GC Overhead: The JVM may spend more time during GC cycles trying to find suitable spaces for object allocation, which can degrade performance.

Heap Fragmentation: Some collectors (like CMS) suffer from heap fragmentation since they don\u2019t compact memory after reclaiming space.

Pinned Objects: Sometimes, objects cannot be moved during GC (e.g., JNI references or thread-local objects). This can lead to fragmentation.

"},{"location":"langdives/Java/GarbageCollection/#mitigating","title":"Mitigating","text":"

Using G1 GC or ZGC: These collectors are designed to handle fragmentation better than older collectors. They manage memory in regions and perform compaction as part of their regular operations.

Heap Size Adjustments: Increasing the size of the old generation can help reduce the frequency of fragmentation issues.

Monitoring and Tuning: Regularly monitor memory usage and GC logs to identify fragmentation patterns. Tuning the JVM parameters can help alleviate fragmentation issues.

Object Pooling: Reusing objects instead of frequently allocating and deallocating them can help reduce fragmentation.

"},{"location":"langdives/Java/GarbageCollection/#configuring-garbage-collection","title":"Configuring garbage collection","text":"

Configuring garbage collection and its parameters in Java is primarily done through JVM (Java Virtual Machine) options when starting your application.

"},{"location":"langdives/Java/GarbageCollection/#how-to-configure-params","title":"How to Configure Params","text":"

Command-Line Options: You can specify GC options when you start your Java application using the java command.

Example
java -Xms512m -Xmx4g -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -jar my-application.jar\n

Environment Variables: For containerized applications (like those running in Docker or Kubernetes), you can set JVM options through environment variables or directly in the configuration file.

Example in Docker
ENV JAVA_OPTS=\"-Xms512m -Xmx4g -XX:+UseG1GC\"\nCMD java $JAVA_OPTS -jar your-application.jar\n

Configuration Files: Some applications allow you to specify JVM options in a configuration file, which can be helpful for managing multiple parameters in one place.

"},{"location":"langdives/Java/GarbageCollection/#common-gc-options","title":"Common GC Options","text":"

Basic Heap Size Configuration

Choosing a Garbage Collector

G1 GC
-XX:+UseG1GC\n
Parallel GC
-XX:+UseParallelGC\n
CMS (Concurrent Mark-Sweep)
-XX:+UseConcMarkSweepGC\n
ZGC
-XX:+UseZGC\n
Shenandoah
-XX:+UseShenandoahGC\n

Tuning G1 GC

Tuning Parallel GC

Monitoring and Logging

Controlling Object Promotion

Metaspace Configuration (for class metadata)

"},{"location":"langdives/Java/GarbageCollection/#example-configuration","title":"Example Configuration","text":"

Here\u2019s an example of a command to start a Java application with G1 GC and some tuning parameters

Example
java -Xms1g -Xmx8g -XX:+UseG1GC \\\n   -XX:MaxGCPauseMillis=200 \\\n   -XX:G1HeapRegionSize=16m \\\n   -XX:InitiatingHeapOccupancyPercent=30 \\\n   -XX:ConcGCThreads=2 \\\n   -Xlog:gc*:file=gc.log \\\n   -jar my-application.jar\n
"},{"location":"langdives/Java/GarbageCollection/#deep-tuning-techniques","title":"Deep Tuning Techniques","text":"

Heap Size and GC Frequency

GC Latency and Response Times

Application Throughput vs Latency

Survivor Space Tuning

Tuning G1 GC

Young GC Tuning

Tuning Java GC for High Performance

Handling Multi-Terabyte Heaps

GC in Cloud and Microservices

Latency Monitoring

"},{"location":"langdives/Java/GarbageCollection/#tools-for-analysis","title":"Tools for Analysis","text":"

GC Logs: Capture GC details by adding -Xlog:gc* or -XX:+PrintGCDetails JVM options.

Sample GC Log
[GC (Allocation Failure) [PSYoungGen: 2048K->512K(2560K)] 4096K->2048K(8192K), 0.0103451 secs]\n

VisualVM: A monitoring tool bundled with the JDK for real-time JVM performance monitoring.

Java Flight Recorder (JFR): An advanced profiling tool that collects detailed JVM metrics, including GC data.

JConsole: Visualize JVM statistics and monitor heap usage.

"},{"location":"langdives/Java/GarbageCollection/#diagnosing-troubleshooting","title":"Diagnosing & Troubleshooting","text":"

OutOfMemoryError (OOM): Common causes

Heap Dump Analysis:

Detecting Leaks: Look for large, unreachable objects with static references or growing collections (e.g., large HashMap or ArrayList).

Java Flight Recorder (JFR): JFR provides detailed memory profiling without heavy overhead. Collect a recording and analyze it for object lifetimes, GC events, and thread behavior.

"},{"location":"langdives/Java/GarbageCollection/#summary","title":"Summary","text":"

Choose the Right GC Collector

Monitor and Analyze

Avoid Full GC

Pre-tuning Advice

"},{"location":"langdives/Java/Gradle/","title":"Gradle","text":""},{"location":"langdives/Java/Gradle/#what-is-gradle","title":"What is Gradle ?","text":"

Gradle is a modern, powerful build automation tool used for building, testing, and deploying applications. It is particularly popular in Java and Android projects due to its flexibility and performance. Gradle uses a Groovy/Kotlin-based DSL to configure and manage builds, allowing for easy customization.

"},{"location":"langdives/Java/Gradle/#how-gradle-works","title":"How Gradle Works ?","text":"

Gradle organizes builds using a Directed Acyclic Graph (DAG) of tasks, ensuring that tasks are executed only when necessary.

The Build process has Three phases

Initialization Phase

Configuration Phase

Execution Phase

"},{"location":"langdives/Java/Gradle/#gradle-build","title":"Gradle Build","text":"

Gradle build configuration is done in the build.gradle file. This file uses Groovy or Kotlin DSL to describe:

build.gradle Example

plugins {\n    id 'java'        // Apply Java plugin\n    id 'application' // Allow running the app from CLI\n}\n\nrepositories {\n    mavenCentral() // Use Maven Central for dependencies\n}\n\ndependencies {\n    implementation 'org.apache.commons:commons-lang3:3.12.0'  // Runtime dependency\n    testImplementation 'junit:junit:4.13.2'  // Test dependency\n}\n\napplication {\n    mainClass = 'com.example.UnderTheHood'  // Entry point of the app\n}\n
"},{"location":"langdives/Java/Gradle/#understanding-buildgradle","title":"Understanding build.gradle","text":"

Plugins Section

plugins {\n    id 'java'\n    id 'application'\n}\n

Repositories Section

repositories {\n    mavenCentral()\n}\n

Dependencies Section

dependencies {\n    implementation 'org.apache.commons:commons-lang3:3.12.0'\n    testImplementation 'junit:junit:4.13.2'\n}\n

Application Configuration

application {\n    mainClass = 'com.example.UnderTheHood'\n}\n
"},{"location":"langdives/Java/Gradle/#dependency-management","title":"Dependency Management","text":"

Gradle allows automatic dependency management. Dependencies (like libraries and frameworks) are fetched from repositories such as:

Gradle resolves dependencies in the following order:

Offline Mode
# Forces Gradle to use only the local cache \n# and does not try to access remote repositories.\ngradle build --offline\n
"},{"location":"langdives/Java/Gradle/#custom-tasks","title":"Custom Tasks","text":"

Gradle allows developers to create custom tasks to automate specific workflows.

Custom Tasks Example Create Custom Task
task uth {\n    doLast {\n        println 'Hello, UnderTheHood ;)'\n    }\n}\n
Run the Custom task
gradle uth\n
Output
Hello, UnderTheHood ;)\n

Custom tasks can be chained and made dependent on other tasks:

task compileCode {\n    dependsOn clean\n    doLast {\n        println 'Compiling code...'\n    }\n}\n
"},{"location":"langdives/Java/Gradle/#publishing-artifacts","title":"Publishing Artifacts","text":"

You can publish your project\u2019s artifacts (e.g., JARs) to Maven Local or Remote repositories using the maven-publish plugin.

Local Maven Publish Example

Apply Maven Publish Plugin
plugins {\n    id 'maven-publish'\n}\n
Configure Publishing in build.gradle
publishing {\n    publications {\n        mavenJava(MavenPublication) {\n            from components.java\n        }\n    }\n    repositories {\n        mavenLocal()  // Publish to local Maven repository (~/.m2/repository)\n    }\n}\n
Publish the Artifact
# This will install the JAR into your local Maven repository.\ngradle publishToMavenLocal\n
"},{"location":"langdives/Java/Gradle/#gradle-project-structure","title":"Gradle Project Structure","text":"Gradle recommended standard directory structure
/my-project\n\u2502\n\u251c\u2500\u2500 build.gradle          # Build configuration file\n\u251c\u2500\u2500 settings.gradle       # Project settings file\n\u251c\u2500\u2500 src\n\u2502   \u2514\u2500\u2500 main\n\u2502       \u2514\u2500\u2500 java          # Source code\n\u2502   \u2514\u2500\u2500 test\n\u2502       \u2514\u2500\u2500 java          # Unit tests\n\u2514\u2500\u2500 build                 # Output directory (JAR, WAR)\n

Gradle supports multi-module projects where different modules are part of the same build.

Example Multi-Project Structure
/root-project\n\u2502\n\u251c\u2500\u2500 build.gradle            # Root project configuration\n\u251c\u2500\u2500 settings.gradle         # Lists sub-projects\n\u251c\u2500\u2500 module-1/\n\u2502   \u2514\u2500\u2500 build.gradle        # Configuration for module 1\n\u2514\u2500\u2500 module-2/\n    \u2514\u2500\u2500 build.gradle        # Configuration for module 2\n
settings.gradle
rootProject.name = 'multi-project-example'\ninclude 'module-1', 'module-2'\n
Running the build
# This will build all modules in the correct order.\ngradle build\n
"},{"location":"langdives/Java/Gradle/#gradle-wrapper","title":"Gradle Wrapper","text":"

The Gradle Wrapper is a feature that allows a project to include a specific Gradle version along with scripts to execute builds. This ensures that anyone working on the project uses the same Gradle version without requiring a manual installation.

The Gradle Wrapper consists of:

"},{"location":"langdives/Java/Gradle/#why-use-wrapper","title":"Why Use Wrapper ?","text":""},{"location":"langdives/Java/Gradle/#gradle-commands","title":"Gradle Commands","text":"

Here are some essential Gradle commands for working with projects:

Command Description gradle init Initializes a new Gradle project. gradle build Compiles, tests, and packages the project. gradle run Runs the application (if using the Application plugin). gradle clean Removes the build/ directory for a fresh build. gradle tasks Lists all available tasks. gradle test Runs all tests in the project. gradle publish Publishes artifacts to Maven repositories."},{"location":"langdives/Java/Gradle/#performance-benefits","title":"Performance Benefits","text":"

Gradle is designed for speed and efficiency

"},{"location":"langdives/Java/Gradle/#summary","title":"Summary","text":"

Gradle provides several advantages for modern projects

"},{"location":"langdives/Java/JDK-JRE-JVM/","title":"Java","text":"

Java is a high-level, object-oriented programming language designed for portability, security, and ease of use. It is known for its \"write once, run anywhere\" capability, allowing developers to create software that can run on any device with a Java Virtual Machine (JVM).

"},{"location":"langdives/Java/JDK-JRE-JVM/#architecture","title":"Architecture","text":"

The Java architecture is composed of three main components:

"},{"location":"langdives/Java/JDK-JRE-JVM/#jdk","title":"JDK","text":"

The JDK Java Development Kit is a comprehensive development environment for building Java applications. It provides all the tools necessary for Java developers to create, compile, and package Java applications.

Components

"},{"location":"langdives/Java/JDK-JRE-JVM/#jre","title":"JRE","text":"

The JRE-Java Runtime Environment provides the libraries, Java Virtual Machine (JVM), and other components necessary for running Java applications. It does not include development tools, making it ideal for end-users who only need to run Java applications.

Components

"},{"location":"langdives/Java/JDK-JRE-JVM/#jvm","title":"JVM","text":"

The JVM Java Virtual Machine is an abstract computing machine that enables a computer to run Java programs. It is responsible for interpreting and executing the bytecode generated by the Java compiler.

Functions

"},{"location":"langdives/Java/JDK-JRE-JVM/#hierarchical-structure","title":"Hierarchical Structure","text":"

Hierarchical Structure

JDK (includes javac, JRE, Tools)\n\u2514\u2500\u2500 JRE (includes JVM and libraries)\n    \u2514\u2500\u2500 JVM (executes bytecode)\n
"},{"location":"langdives/Java/JDK-JRE-JVM/#how-java-executes","title":"How Java Executes ?","text":"

The execution of Java code involves several steps, transitioning through the JDK, JRE, and JVM before reaching the machine code that the computer's CPU executes.

Code Writing: Java developers write source code in plain text files using the .java extension. This code defines classes and methods that make up the Java application.

Code Compilation: The developer uses the Java compiler (javac), which is part of the JDK, to compile the .java file. This process translates the human-readable Java code into an intermediate form known as bytecode. The output of this step is a .class file containing the bytecode.

Running the Application: To run the Java application, the developer executes a command using the Java runtime environment (e.g., java ClassName), which triggers the JRE, The JRE includes the JVM, which performs the following steps:

Machine Code Execution: The machine code generated by the JVM is executed by the host operating system's CPU. This process allows Java applications to be platform-independent, as the same bytecode can run on any system that has a compatible JVM.

"},{"location":"langdives/Java/Java8vs11vs17vs21/","title":"Java 8 vs 11 vs 17 vs 21","text":"

A detailed comparison of Java 8, Java 11, Java 17, and Java 21, summarizing the key differences, improvements, and deprecations introduced across these versions:

"},{"location":"langdives/Java/Java8vs11vs17vs21/#java-8-released-march-2014","title":"Java 8 (Released March 2014)","text":"

Java 8 is a long-term support (LTS) release, bringing significant new features:

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#drawbacks","title":"Drawbacks","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-11-released-sept-2018","title":"Java 11 (Released Sept 2018)","text":"

Java 11 is also LTS and a significant milestone since it removed many outdated APIs and modularized the runtime.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_1","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations","title":"Removals & Deprecations","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_1","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-17-released-sept-2021","title":"Java 17 (Released Sept 2021)","text":"

Java 17 is an LTS release, refining many features introduced in Java 9-16 and stabilizing the platform.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_2","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations_1","title":"Removals & Deprecations","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#garbage-improvements","title":"Garbage Improvements","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_2","title":"Performance & Security","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#java-21-released-sept-2023","title":"Java 21 (Released Sept 2023)","text":"

Java 21 is a non-LTS release (though with unofficial support from some vendors). It introduces many experimental and innovative features.

"},{"location":"langdives/Java/Java8vs11vs17vs21/#major-features_3","title":"Major Features","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#removals-deprecations_2","title":"Removals & Deprecations:","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#performance-security_3","title":"Performance & Security:","text":""},{"location":"langdives/Java/Java8vs11vs17vs21/#key-versions-differences","title":"Key Versions Differences","text":"Feature / Change Java 8 Java 11 Java 17 Java 21 LTS Release Yes Yes Yes No Lambda Expressions Yes Yes Yes Yes HTTP Client No Yes (HTTP/2) Yes Yes Modular System (JPMS) No Yes Yes Yes Records No No Yes Yes Sealed Classes No No Yes Yes Text Blocks No No Yes Yes Pattern Matching (instanceof) No No Yes Yes Virtual Threads (Loom) No No No Yes Garbage Collectors G1 GC ZGC ZGC, Shenandoah Improved ZGC, Shenandoah String Enhancements Basic strip(), repeat() Text Blocks String Templates TLS Version 1.2 1.3 1.3 1.3 Security Manager Available Deprecated Deprecated Removed Nashorn JavaScript Engine Yes Deprecated Removed Removed"},{"location":"langdives/Java/Java8vs11vs17vs21/#summary","title":"Summary","text":"

For production systems, upgrading to Java 17 is generally recommended unless your project needs experimental features from Java 21.

"},{"location":"langdives/Java/JavaPassBy/","title":"Is Java Pass By Value or By Reference ?","text":"

Java is stricly pass by value but lets go in depth.

"},{"location":"langdives/Java/JavaPassBy/#reference-vs-primitive","title":"Reference vs Primitive","text":"

Primitive Types: These are the basic data types in Java (e.g., int, char, boolean). When you pass a primitive type to a method, a copy of the value is made.

Reference Types: These include objects, arrays, and instances of classes. When you pass a reference type to a method, you pass a reference (or pointer) to the actual object in memory, not the object itself.

"},{"location":"langdives/Java/JavaPassBy/#pass-by-value","title":"Pass by Value","text":"

Java uses a mechanism called pass by value for method arguments, but it\u2019s important to clarify how this applies to primitive and reference types.

"},{"location":"langdives/Java/JavaPassBy/#primitive-types","title":"Primitive Types","text":"

When you pass a primitive type to a method, the method receives a copy of the variable's value. Any changes made to this copy do not affect the original variable.

Example
public class PassByValueExample {\n    public static void main(String[] args) {\n        int num = 10;\n        modifyValue(num); // Passing primitive\n        System.out.println(num); // Output: 10\n    }\n\n    public static void modifyValue(int value) {\n        value = 20; // Only modifies the copy\n    }\n}\n
"},{"location":"langdives/Java/JavaPassBy/#reference-types","title":"Reference Types","text":"

When you pass a reference type to a method, the reference itself is passed by value. This means the method receives a copy of the reference to the object. While you can change the object's properties, you cannot change the reference to point to a different object.

Example
class MyClass {\n    int value;\n\n    MyClass(int value) {\n        this.value = value;\n    }\n}\n\npublic class PassByReferenceExample {\n    public static void main(String[] args) {\n        MyClass obj = new MyClass(10);\n        modifyObject(obj); // Passing reference\n        System.out.println(obj.value); // Output: 20\n    }\n\n    public static void modifyObject(MyClass myObject) {\n        myObject.value = 20; // Modifies the object's property\n        // myObject = new MyClass(30); // This would not affect the original object reference only changes local myObject.\n    }\n}\n
"},{"location":"langdives/Java/JavaPassBy/#why","title":"Why?","text":"

When a method is called, a new stack frame is created, and local variables (including method parameters) are stored in this stack frame. Objects are stored in the heap, and the reference to these objects is passed to methods. When you modify the object\u2019s state inside the method, it reflects outside the method because both the original reference and the parameter reference point to the same object in memory.

"},{"location":"langdives/Java/JavaPassBy/#scope-and-lifetime","title":"Scope and Lifetime","text":""},{"location":"langdives/Java/JavaPassBy/#summary","title":"Summary","text":"

Java is pass-by-value:

Changes to the object through the reference affect the original object, but reassignment of the reference does not affect the original reference.

"},{"location":"langdives/Java/KeyWordsTerminolgies/","title":"Keywords and Terminolgies","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#class-object","title":"Class & Object","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#modifiers","title":"Modifiers","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#inheritance-polymorphism","title":"Inheritance & Polymorphism","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#control-flow","title":"Control Flow","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#exception-handling","title":"Exception Handling","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#memory-managementthreada","title":"Memory Management/Threada","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#types","title":"Types","text":""},{"location":"langdives/Java/KeyWordsTerminolgies/#others","title":"Others","text":""},{"location":"langdives/Java/Locking-Intrinsic/","title":"Locking","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers synchronized blocks.

"},{"location":"langdives/Java/Locking-Intrinsic/#what-is-locking","title":"What is Locking?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock.

"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-and-intrinsic-locks","title":"Synchronized and Intrinsic Locks","text":"

Java\u2019s synchronized key word is one of the primary ways to control access to shared resources in multithreaded programs. It ensures thread safety by providing mutual exclusion and visibility guarantees. Let's go further into every aspect of synchronized.

How synchronized Works ?

When a method or block is marked as synchronized, the JVM uses a monitor lock (intrinsic lock) for the associated object or class. The monitor is a synchronization construct provided by the JVM.

Two things happen when a thread enters a synchronized block or method:

Intrinsic Lock: Each Java object has an intrinsic lock (also called monitor lock) associated with it, The thread that enters the synchronized block acquires the intrinsic lock. When it leaves the block, it releases the lock, allowing other threads to acquire it.

"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-methods","title":"Synchronized Methods","text":""},{"location":"langdives/Java/Locking-Intrinsic/#instance-level-locking","title":"Instance-Level Locking","text":"

When you synchronize a non-static method, the thread acquires the lock on the instance of the class (the this object).

public synchronized void increment() {\n    // Lock acquired on the current instance (this)\n    count++;\n}\n
Example with Instance-Level Locking
class Counter {\n    private int count = 0;\n\n    public synchronized void increment() {\n        count++;\n    }\n\n    public synchronized int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n

Why does this work ?

Since both threads are operating on the same Counter object, only one thread at a time can execute the increment() method due to instance-level locking.

"},{"location":"langdives/Java/Locking-Intrinsic/#class-level-locking","title":"Class-Level Locking","text":"

A static synchronized method locks on the Class object (i.e., ClassName.class) rather than on an instance. This ensures that all threads calling static methods on the class are synchronized.

public synchronized static void staticIncrement() {\n    // Lock acquired on the class object (Counter.class)\n}\n
Example with Class-Level Locking
class Counter {\n    private static int count = 0;\n\n    public synchronized static void increment() {\n        count++;\n    }\n\n    public synchronized static int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) Counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) Counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + Counter.getCount());  // Output: 2000\n    }\n}\n
"},{"location":"langdives/Java/Locking-Intrinsic/#synchronized-blocks","title":"Synchronized Blocks","text":"

A synchronized block provides more control than a synchronized method. You can choose which object\u2019s intrinsic lock to use, instead of locking the entire method.

public void increment() {\n    synchronized (this) {  // Locking on the current instance\n        count++;\n    }\n}\n
When to use ?
class Counter {\n    private int count = 0;\n    private final Object lock = new Object();\n\n    public void increment() {\n        synchronized (lock) {  // Locking on a custom object\n            count++;\n        }\n    }\n}\n
Example: Synchronized Block with Fine-Grained Control
public void updateBothCounters(Counter counter1, Counter counter2) {\n    synchronized (counter1) {  // Locking on the first Counter object\n        counter1.increment();\n    }\n    synchronized (counter2) {  // Locking on the second Counter object\n        counter2.increment();\n    }\n}\n
"},{"location":"langdives/Java/Locking-Intrinsic/#how-it-work-internally","title":"How it Work Internally","text":""},{"location":"langdives/Java/Locking-Intrinsic/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Intrinsic/#potential-issues","title":"Potential Issues","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/","title":"Issues with Locking - DeadLock","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how deadlocks occur, how to prevent them, and practical examples of various techniques to detect and resolve deadlocks. A deadlock is a common concurrency issue in multithreaded programs and can severely impact performance.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#what-is-deadlock","title":"What is Deadlock ?","text":"

A deadlock occurs when two or more threads are blocked indefinitely, Each thread is waiting for a lock held by the other, and neither can proceed.

This results in a circular wait, where no thread can release the locks it holds, leading to a deadlock condition.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#how-deadlock-occurs","title":"How Deadlock Occurs ?","text":"

Let\u2019s revisit the classic deadlock example.

Deadlock Example
class A {\n    public synchronized void methodA(B b) {\n        System.out.println(Thread.currentThread().getName() + \": Locked A, waiting for B...\");\n        try {\n            Thread.sleep(50);  // Simulate some work\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n        b.last();  // Waiting for lock on object B\n    }\n\n    public synchronized void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside A.last()\");\n    }\n}\n\nclass B {\n    public synchronized void methodB(A a) {\n        System.out.println(Thread.currentThread().getName() + \": Locked B, waiting for A...\");\n        try {\n            Thread.sleep(50);  // Simulate some work\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n        a.last();  // Waiting for lock on object A\n    }\n\n    public synchronized void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside B.last()\");\n    }\n}\n\npublic class DeadlockDemo {\n    public static void main(String[] args) {\n        A a = new A();\n        B b = new B();\n\n        Thread t1 = new Thread(() -> a.methodA(b), \"Thread 1\");\n        Thread t2 = new Thread(() -> b.methodB(a), \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Flow Analysis

  1. Thread 1 starts and calls a.methodA(b). It acquires the lock on object A and prints:

    Thread 1: Locked A, waiting for B...\n

  2. Thread 2 starts and calls b.methodB(a). It acquires the lock on object B and prints:

    Thread 2: Locked B, waiting for A...\n

  3. Now:

    • Thread 1 holds the lock on A and waits for Thread 2 to release the lock on B.
    • Thread 2 holds the lock on B and waits for Thread 1 to release the lock on A.

Both threads are waiting indefinitely, resulting in a deadlock.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#how-to-avoid","title":"How to Avoid ?","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/#acquiring-locks-in-a-order","title":"Acquiring Locks in a Order","text":"

If all threads acquire locks in the same order, deadlock can be prevented.

Modified Example: Acquiring Locks in the Same Order
class A {\n    public void methodA(B b) {\n        synchronized (this) {\n            System.out.println(Thread.currentThread().getName() + \": Locked A, waiting for B...\");\n            synchronized (b) {\n                System.out.println(Thread.currentThread().getName() + \": Acquired lock on B\");\n                b.last();\n            }\n        }\n    }\n\n    public void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside A.last()\");\n    }\n}\n\nclass B {\n    public void methodB(A a) {\n        synchronized (this) {\n            System.out.println(Thread.currentThread().getName() + \": Locked B, waiting for A...\");\n            synchronized (a) {\n                System.out.println(Thread.currentThread().getName() + \": Acquired lock on A\");\n                a.last();\n            }\n        }\n    }\n\n    public void last() {\n        System.out.println(Thread.currentThread().getName() + \": Inside B.last()\");\n    }\n}\n\npublic class DeadlockResolved {\n    public static void main(String[] args) {\n        A a = new A();\n        B b = new B();\n\n        Thread t1 = new Thread(() -> a.methodA(b), \"Thread 1\");\n        Thread t2 = new Thread(() -> b.methodB(a), \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Explanation

Both threads now acquire locks in the same order (A \u2192 B). This ensures that deadlock cannot occur.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#using-trylock-with-timeout","title":"Using tryLock() with Timeout","text":"

The tryLock() method attempts to acquire a lock and fails gracefully if the lock is not available within a specified time.

Deadlock Prevention using tryLock() example
import java.util.concurrent.TimeUnit;\nimport java.util.concurrent.locks.ReentrantLock;\n\npublic class TryLockDemo {\n    private final ReentrantLock lockA = new ReentrantLock();\n    private final ReentrantLock lockB = new ReentrantLock();\n\n    public void methodA() {\n        try {\n            if (lockA.tryLock(1, TimeUnit.SECONDS)) {\n                System.out.println(Thread.currentThread().getName() + \": Locked A\");\n                Thread.sleep(50);  // Simulate some work\n\n                if (lockB.tryLock(1, TimeUnit.SECONDS)) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \": Locked B\");\n                    } finally {\n                        lockB.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \": Could not acquire lock B, releasing A\");\n                }\n\n                lockA.unlock();\n            }\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n    }\n\n    public void methodB() {\n        try {\n            if (lockB.tryLock(1, TimeUnit.SECONDS)) {\n                System.out.println(Thread.currentThread().getName() + \": Locked B\");\n                Thread.sleep(50);  // Simulate some work\n\n                if (lockA.tryLock(1, TimeUnit.SECONDS)) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \": Locked A\");\n                    } finally {\n                        lockA.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \": Could not acquire lock A, releasing B\");\n                }\n\n                lockB.unlock();\n            }\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        }\n    }\n\n    public static void main(String[] args) {\n        TryLockDemo demo = new TryLockDemo();\n\n        Thread t1 = new Thread(demo::methodA, \"Thread 1\");\n        Thread t2 = new Thread(demo::methodB, \"Thread 2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Explanation

If a thread fails to acquire a lock within the timeout, it releases any locks it holds, avoiding a deadlock.

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#detecting-using-monitoring-tools","title":"Detecting Using Monitoring Tools","text":"

You can detect deadlocks using tools like:

"},{"location":"langdives/Java/Locking-Issues-DeadLock/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-DeadLock/#summary","title":"Summary","text":"

Deadlocks are one of the most common and dangerous issues in multithreaded programming.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/","title":"Issues with Locking - LiveLock","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how livelock occur, how to prevent them, and practical examples of various techniques to detect and resolve livelock.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#what-is-livelock","title":"What is Livelock ?","text":"

In a livelock, multiple threads remain active but unable to make progress because they keep responding to each other\u2019s actions. Unlike deadlock, where threads are stuck waiting for locks indefinitely, threads in a livelock keep changing their states in response to each other, but they fail to reach a final state or make useful progress.

Key difference from deadlock

In deadlock, threads are blocked waiting for each other, while in livelock, threads are not blocked, but they keep releasing and reacquiring locks or changing states in a way that prevents progress.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#example-of-livelock","title":"Example of Livelock","text":"

Consider two people trying to pick up a spoon to eat, but they keep yielding to each other in an attempt to be polite. Neither person can make progress because they\u2019re constantly checking and responding to each other\u2019s actions.

Livelock Example
class Spoon {\n    private boolean isAvailable = true;\n\n    public synchronized boolean pickUp() {\n        if (isAvailable) {\n            isAvailable = false;\n            return true;\n        }\n        return false;\n    }\n\n    public synchronized void putDown() {\n        isAvailable = true;\n    }\n}\n\npublic class LivelockDemo {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            while (!spoon.pickUp()) {\n                System.out.println(\"Person 1: Waiting for spoon...\");\n                Thread.yield();  // Yield control to other threads\n            }\n            System.out.println(\"Person 1: Picked up spoon!\");\n        });\n\n        Thread person2 = new Thread(() -> {\n            while (!spoon.pickUp()) {\n                System.out.println(\"Person 2: Waiting for spoon...\");\n                Thread.yield();  // Yield control to other threads\n            }\n            System.out.println(\"Person 2: Picked up spoon!\");\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#causes-of-livelock","title":"Causes of Livelock","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#how-to-avoid-livelocks","title":"How to Avoid Livelocks","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-timeouts-for-locking","title":"Use Timeouts for Locking","text":"

Using timeouts helps threads avoid indefinite waiting. If a thread cannot acquire the lock within a certain time, it can stop trying or take an alternative path.

Using tryLock() with Timeout
import java.util.concurrent.TimeUnit;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public boolean pickUp() throws InterruptedException {\n        // Try to acquire the lock with a timeout\n        return lock.tryLock(1, TimeUnit.SECONDS);\n    }\n\n    public void putDown() {\n        lock.unlock();\n    }\n}\n\npublic class LivelockFixed {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            try {\n                if (spoon.pickUp()) {\n                    System.out.println(\"Person 1: Picked up spoon!\");\n                    spoon.putDown();\n                } else {\n                    System.out.println(\"Person 1: Couldn't get the spoon in time.\");\n                }\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread person2 = new Thread(() -> {\n            try {\n                if (spoon.pickUp()) {\n                    System.out.println(\"Person 2: Picked up spoon!\");\n                    spoon.putDown();\n                } else {\n                    System.out.println(\"Person 2: Couldn't get the spoon in time.\");\n                }\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Why it works ?

If a thread fails to acquire the lock within 1 second, it backs off instead of trying indefinitely.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-back-off-strategies","title":"Use Back-off Strategies","text":"

A back-off strategy makes threads wait for a random amount of time before retrying. This avoids a situation where two threads keep checking the same lock in sync.

Back-off Strategy Example
import java.util.Random;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Random random = new Random();\n\n    public boolean tryPickUp() {\n        return lock.tryLock();\n    }\n\n    public void putDown() {\n        lock.unlock();\n    }\n\n    public void backOff() throws InterruptedException {\n        Thread.sleep(random.nextInt(100));  // Wait for a random time\n    }\n}\n\npublic class LivelockWithBackoff {\n    public static void main(String[] args) {\n        Spoon spoon = new Spoon();\n\n        Thread person1 = new Thread(() -> {\n            try {\n                while (!spoon.tryPickUp()) {\n                    System.out.println(\"Person 1: Waiting...\");\n                    spoon.backOff();  // Wait before retrying\n                }\n                System.out.println(\"Person 1: Picked up spoon!\");\n                spoon.putDown();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread person2 = new Thread(() -> {\n            try {\n                while (!spoon.tryPickUp()) {\n                    System.out.println(\"Person 2: Waiting...\");\n                    spoon.backOff();  // Wait before retrying\n                }\n                System.out.println(\"Person 2: Picked up spoon!\");\n                spoon.putDown();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        person1.start();\n        person2.start();\n    }\n}\n

Why it works ?

The random back-off time prevents threads from retrying in lockstep, avoiding livelock.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#avoid-excessive-yielding","title":"Avoid Excessive Yielding","text":"

Frequent use of Thread.yield() can lead to livelock. Instead, use timeouts or back-off strategies to prevent threads from constantly giving way to each other.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#use-condition-variables","title":"Use Condition Variables","text":"

Use Condition variables (available with ReentrantLock) to properly coordinate threads waiting on specific conditions.

Using Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass Spoon {\n    private boolean isAvailable = true;\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Condition spoonAvailable = lock.newCondition();\n\n    public void pickUp() throws InterruptedException {\n        lock.lock();\n        try {\n            while (!isAvailable) {\n                spoonAvailable.await();  // Wait until spoon is available\n            }\n            isAvailable = false;\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void putDown() {\n        lock.lock();\n        try {\n            isAvailable = true;\n            spoonAvailable.signal();  // Notify waiting thread\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n

Why it works ?

Using condition variables ensures that only one thread proceeds when the spoon becomes available, avoiding busy-waiting and yielding.

"},{"location":"langdives/Java/Locking-Issues-LiveLock/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-LiveLock/#summary","title":"Summary","text":"

Livelocks can be tricky to detect because threads remain active, but they fail to make meaningful progress. By using timeouts, back-off strategies, condition variables, and proper locking mechanisms.

"},{"location":"langdives/Java/Locking-Issues-Others/","title":"Issues with Locking - Other Issues","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

We will cover key locking issues in Java in this article like race conditions, thread contention, missed signals, nested locks, overuse of locks, and performance impact. Each section contains causes, examples, solutions, and best practices to avoid or mitigate these issues.

"},{"location":"langdives/Java/Locking-Issues-Others/#race-conditions-despite-locking","title":"Race Conditions Despite Locking","text":"Cause

A race condition occurs when multiple threads access a shared resource without proper synchronization, leading to inconsistent results based on the timing of thread execution. Even with partial locks, a shared variable may still be accessed inconsistently if not protected properly.

Race Condition Example

class Counter {\n    private int count = 0;\n\n    public void increment() {\n        synchronized (this) {\n            count++;\n        }\n    }\n\n    public int getCount() {\n        // Not synchronized, potential race condition.\n        return count;\n    }\n}\n

Problem

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#contention-performance-issues","title":"Contention & Performance Issues","text":"Cause

When multiple threads compete for the same lock, they spend time waiting for the lock to become available, reducing throughput and performance.

Contention Example

class BankAccount {\n    private int balance = 100;\n\n    public synchronized void withdraw(int amount) {\n        balance -= amount;\n    }\n\n    public synchronized int getBalance() {\n        return balance;\n    }\n}\n

Problem

If multiple threads frequently access the withdraw() method, contention for the lock will occur, degrading performance.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#missed-signals-lost-wake-ups","title":"Missed Signals & Lost Wake-ups","text":"Cause

When a thread misses a notify() signal because it was not yet waiting on the lock, a lost wake-up occurs. This results in threads waiting indefinitely for a signal that has already been sent.

Lost Wake-Up Example

public synchronized void produce() throws InterruptedException {\n    while (available) {\n        wait();  // Missed if notify() was called before reaching here.\n    }\n    available = true;\n    notify();\n}\n
Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#nested-locks-ordering-issues","title":"Nested Locks & Ordering Issues","text":"Cause

Using multiple locks can cause deadlocks if threads acquire locks in different orders.

Deadlock Example

synchronized (lock1) {\n    synchronized (lock2) {\n        // Critical section\n    }\n}\n

Problem

If Thread 1 acquires lock1 and Thread 2 acquires lock2, both threads will wait indefinitely for each other\u2019s lock, resulting in a deadlock.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#overuse-of-locks","title":"Overuse of Locks","text":"Cause

Using too many locks or locking too frequently can reduce parallelism, resulting in poor scalability, If every method in a class is synchronized, threads will frequently block each other, reducing concurrency and efficiency.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#overhead-of-locking","title":"Overhead of Locking","text":"Cause

Locking adds overhead in the form of: - Context switches between threads. - CPU cache invalidation. - JVM's monitor management for intrinsic locks.

Performance Issues with Synchronized Code

Excessive locking causes contention and frequent context switches, impacting throughput and latency.

Solution and Best Practices "},{"location":"langdives/Java/Locking-Issues-Others/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-Others/#summary","title":"Summary","text":"

Locking is essential to ensure thread safety, but improper use can lead to issues such as race conditions, deadlocks, livelocks, contention, and performance degradation. Understanding these issues and following best practices will help you write efficient, scalable, and thread-safe code. Using fine-grained locks and concurrent utilities wisely to maximize concurrency while minimizing risks.

"},{"location":"langdives/Java/Locking-Issues-Starvation/","title":"Issues with Locking - Starvation","text":"

Locking mechanisms in Java, while essential for ensuring thread safety in multithreaded applications, can introduce various issues if not used properly.

In this article, we\u2019ll explore how starvation occur, how to prevent them, and practical examples of various techniques to detect and resolve starvation.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#what-is-starvation","title":"What is Starvation ?","text":"

Starvation is a condition where low-priority threads are unable to gain access to resources because higher-priority threads or unfair scheduling policies monopolize CPU time or locks. As a result, the low-priority thread starves and never gets the chance to run, even though it is ready to execute.

This issue can manifest in multithreaded programs when locks or resources are continuously granted to specific threads, leaving others waiting indefinitely. It can occur not only due to CPU scheduling but also due to improper locking strategies, unfair algorithms, or resource starvation.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#causes-of-starvation","title":"Causes of Starvation","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#example-of-starvation","title":"Example of Starvation","text":"Starvation with Unfair Lock Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class StarvationDemo {\n    private static final ReentrantLock lock = new ReentrantLock();  // Unfair lock\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                }\n            }\n        };\n\n        Thread highPriority = new Thread(task, \"High-Priority\");\n        highPriority.setPriority(Thread.MAX_PRIORITY);\n\n        Thread lowPriority = new Thread(task, \"Low-Priority\");\n        lowPriority.setPriority(Thread.MIN_PRIORITY);\n\n        highPriority.start();\n        lowPriority.start();\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Locking-Issues-Starvation/#how-to-avoid","title":"How to Avoid ?","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#use-fair-locks","title":"Use Fair Locks","text":"

Using fair locks ensures that the longest-waiting thread gets the lock first. This prevents threads from skipping the queue and ensures all threads get a chance to execute.

Fair Lock Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class FairLockDemo {\n    private static final ReentrantLock lock = new ReentrantLock(true);  // Fair lock\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                }\n            }\n        };\n\n        Thread highPriority = new Thread(task, \"High-Priority\");\n        Thread lowPriority = new Thread(task, \"Low-Priority\");\n\n        highPriority.setPriority(Thread.MAX_PRIORITY);\n        lowPriority.setPriority(Thread.MIN_PRIORITY);\n\n        highPriority.start();\n        lowPriority.start();\n    }\n}\n

Note

"},{"location":"langdives/Java/Locking-Issues-Starvation/#avoid-priority-based-scheduling","title":"Avoid Priority-Based Scheduling","text":"

Although Java allows you to assign priorities to threads, the JVM\u2019s thread scheduler may not always honor them consistently. It\u2019s generally recommended to avoid relying on thread priorities for critical tasks, If you need to control thread scheduling, use fair locks or condition variables instead of thread priorities.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#backoff-strategies","title":"Backoff Strategies","text":"

Using backoff strategies (delays) between retries can help reduce contention for resources. This ensures that no thread monopolizes the CPU by continuously attempting to acquire a resource.

Backoff Strategy Example
import java.util.concurrent.locks.ReentrantLock;\n\npublic class BackoffDemo {\n    private static final ReentrantLock lock = new ReentrantLock();\n\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            while (true) {\n                if (lock.tryLock()) {\n                    try {\n                        System.out.println(Thread.currentThread().getName() + \" got the lock!\");\n                        break;\n                    } finally {\n                        lock.unlock();\n                    }\n                } else {\n                    System.out.println(Thread.currentThread().getName() + \" waiting...\");\n                    try {\n                        Thread.sleep((long) (Math.random() * 100));  // Random delay\n                    } catch (InterruptedException e) {\n                        Thread.currentThread().interrupt();\n                    }\n                }\n            }\n        };\n\n        Thread t1 = new Thread(task, \"Thread-1\");\n        Thread t2 = new Thread(task, \"Thread-2\");\n\n        t1.start();\n        t2.start();\n    }\n}\n

Note

Random delays ensure that threads do not engage in busy-waiting loops, reducing contention and improving fairness.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#use-thread-pools","title":"Use Thread Pools","text":"

When dealing with many concurrent tasks, using a thread pool ensures that threads are fairly scheduled and resources are shared efficiently.

Using ThreadPoolExecutor Example
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class ThreadPoolDemo {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(2);\n\n        Runnable task = () -> {\n            System.out.println(Thread.currentThread().getName() + \" is running\");\n        };\n\n        for (int i = 0; i < 5; i++) {\n            executor.submit(task);\n        }\n\n        executor.shutdown();\n    }\n}\n

Note

Using thread pools avoids creating too many threads and ensures fair resource sharing.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#avoid-long-critical-sections","title":"Avoid Long Critical Sections","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#use-condition-variables","title":"Use Condition Variables","text":"

Instead of relying on priorities or busy-waiting, use Condition objects with ReentrantLock to manage thread coordination efficiently.

Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.Lock;\nimport java.util.concurrent.locks.ReentrantLock;\n\npublic class ConditionDemo {\n    private static final Lock lock = new ReentrantLock();\n    private static final Condition condition = lock.newCondition();\n\n    public static void main(String[] args) {\n        new Thread(() -> {\n            lock.lock();\n            try {\n                System.out.println(\"Waiting...\");\n                condition.await();  // Wait for a signal\n                System.out.println(\"Resumed\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            } finally {\n                lock.unlock();\n            }\n        }).start();\n\n        new Thread(() -> {\n            lock.lock();\n            try {\n                Thread.sleep(1000);\n                condition.signal();  // Signal the waiting thread\n                System.out.println(\"Signaled\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            } finally {\n                lock.unlock();\n            }\n        }).start();\n    }\n}\n

Note

Using conditions helps avoid busy-waiting and ensures efficient thread signaling.

"},{"location":"langdives/Java/Locking-Issues-Starvation/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-Issues-Starvation/#summary","title":"Summary","text":"

Starvation is a subtle but serious issue in multithreaded programs, particularly when some threads are prioritized over others or when resources are monopolized by specific threads. By using fair locks, thread pools, backoff strategies, and avoiding long critical sections.

"},{"location":"langdives/Java/Locking-Reentrant/","title":"Locking.","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers reentrant locks.

"},{"location":"langdives/Java/Locking-Reentrant/#what-is-locking","title":"What is Locking ?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock.

"},{"location":"langdives/Java/Locking-Reentrant/#what-is-reentrantlock","title":"What is ReentrantLock ?","text":"

The ReentrantLock class, introduced in Java 5, offers more control over thread synchronization than the synchronized keyword. It allows for advanced locking techniques such as fairness policies, tryLock, and interruptible locks. Let\u2019s explore everything about ReentrantLock, including its use cases, internal mechanisms, and best practices.

ReentrantLock is a concrete class in the java.util.concurrent.locks package that implements the Lock interface.

Note

Example
import java.util.concurrent.locks.ReentrantLock;\n\nclass Counter {\n    private int count = 0;\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public void increment() {\n        lock.lock();  // Acquire the lock\n        try {\n            count++;\n        } finally {\n            lock.unlock();  // Release the lock\n        }\n    }\n\n    public int getCount() {\n        lock.lock();\n        try {\n            return count;\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n
"},{"location":"langdives/Java/Locking-Reentrant/#how-it-works-internally","title":"How it Works Internally ?","text":"

Lock Acquisition: When a thread calls lock(), it tries to acquire the lock. If the lock is available, the thread proceeds otherwise, it blocks until the lock becomes available.

Reentrancy: A thread that holds the lock can acquire the lock again without blocking. This is useful when a thread *nters a method that also calls another synchronized method or block that requires the same lock.

Fair vs Unfair Locking:

"},{"location":"langdives/Java/Locking-Reentrant/#advanced-locking-techniques","title":"Advanced Locking Techniques","text":""},{"location":"langdives/Java/Locking-Reentrant/#trylock","title":"tryLock()","text":"

The tryLock() method attempts to acquire the lock without blocking. It returns true if the lock is acquired, otherwise false.

Example
if (lock.tryLock()) {\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} else {\n    System.out.println(\"Could not acquire lock, doing something else...\");\n}\n
When to use ?

When you want to avoid blocking indefinitely if the lock is not available.

"},{"location":"langdives/Java/Locking-Reentrant/#trylock-with-timeout","title":"tryLock with Timeout","text":"

The tryLock(long timeout, TimeUnit unit) method waits for a specific amount of time to acquire the lock.

Example
import java.util.concurrent.TimeUnit;\n\nif (lock.tryLock(1, TimeUnit.SECONDS)) {\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} else {\n    System.out.println(\"Could not acquire lock within timeout.\");\n}\n
When to use ?

When waiting indefinitely is not practical, such as network operations or I/O tasks.

"},{"location":"langdives/Java/Locking-Reentrant/#interruptible-lock-acquisition","title":"Interruptible Lock Acquisition","text":"

The lockInterruptibly() method allows a thread to acquire the lock but respond to interrupts while waiting.

Example
try {\n    lock.lockInterruptibly();  // Wait for lock, but respond to interrupts\n    try {\n        // Perform task\n    } finally {\n        lock.unlock();\n    }\n} catch (InterruptedException e) {\n    System.out.println(\"Thread was interrupted.\");\n}\n
When to use ?

Use when a thread needs to be interrupted while waiting for a lock.

"},{"location":"langdives/Java/Locking-Reentrant/#behavior","title":"Behavior","text":"

A reentrant lock means that the same thread can acquire the lock multiple times without blocking itself. However, the thread must release the lock the same number of times to fully unlock it.

Behavior Example
class ReentrantExample {\n    private final ReentrantLock lock = new ReentrantLock();\n\n    public void outerMethod() {\n        lock.lock();\n        try {\n            System.out.println(\"In outer method\");\n            innerMethod();\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void innerMethod() {\n        lock.lock();\n        try {\n            System.out.println(\"In inner method\");\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n
Explanation

In this example, outerMethod calls innerMethod, and both methods acquire the same lock. This works without issues because ReentrantLock allows reentrant locking.

"},{"location":"langdives/Java/Locking-Reentrant/#condition-variables","title":"Condition Variables","text":"

The Condition interface (associated with a ReentrantLock) allows a thread to wait for a condition to be met before proceeding. It provides better control than the traditional wait()/notify().

Condition Variables Example
import java.util.concurrent.locks.Condition;\nimport java.util.concurrent.locks.ReentrantLock;\n\nclass ConditionExample {\n    private final ReentrantLock lock = new ReentrantLock();\n    private final Condition condition = lock.newCondition();\n    private boolean ready = false;\n\n    public void awaitCondition() throws InterruptedException {\n        lock.lock();\n        try {\n            while (!ready) {\n                condition.await();  // Wait for signal\n            }\n            System.out.println(\"Condition met, proceeding...\");\n        } finally {\n            lock.unlock();\n        }\n    }\n\n    public void signalCondition() {\n        lock.lock();\n        try {\n            ready = true;\n            condition.signal();  // Signal waiting thread\n        } finally {\n            lock.unlock();\n        }\n    }\n}\n
"},{"location":"langdives/Java/Locking-Reentrant/#performance","title":"Performance","text":"

ReentrantLock has more overhead than synchronized due to fairness policies and explicit lock management, Use synchronized for simple scenarios, use reentrantLock for more complex locking requirements(eg: tryLock, fairness).

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/","title":"Locking","text":"

Locking is an essential concept in multithreaded programming to prevent race conditions and ensure thread safety. When multiple threads access shared resources, locks ensure that only one thread accesses the critical section at a time.

This article covers read-write locks.

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#what-is-locking","title":"What is Locking ?","text":"

Locking is a way to ensure that only one thread at a time executes a critical section or modifies a shared resource, Without proper locks, multiple threads may interfere with each other, leading to data inconsistency or unexpected behavior (race conditions).

Java offers various locking mechanisms, from synchronized blocks to explicit locks like ReentrantLock and ReentrantReadWriteLock.

"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#what-is-reentrantreadwritelock","title":"What is ReentrantReadWriteLock?","text":"

The ReentrantReadWriteLock is a specialized lock from Java\u2019s java.util.concurrent.locks package, designed to improve performance in concurrent systems by separating read and write operations. It provides more efficient locking behavior when the majority of operations are read-only, allowing multiple readers to access the shared resource simultaneously but blocking writers until all reading operations are complete.

A ReentrantReadWriteLock maintains two types of locks:

This separation helps optimize performance for read-heavy workloads.

Example
import java.util.concurrent.locks.ReentrantReadWriteLock;\n\nclass SharedResource {\n    private int data = 0;\n    private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();\n\n    public void write(int value) {\n        lock.writeLock().lock();  // Acquire write lock\n        try {\n            data = value;\n            System.out.println(\"Data written: \" + value);\n        } finally {\n            lock.writeLock().unlock();  // Release write lock\n        }\n    }\n\n    public int read() {\n        lock.readLock().lock();  // Acquire read lock\n        try {\n            System.out.println(\"Data read: \" + data);\n            return data;\n        } finally {\n            lock.readLock().unlock();  // Release read lock\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        SharedResource resource = new SharedResource();\n\n        // Writer thread\n        Thread writer = new Thread(() -> resource.write(42));\n\n        // Reader threads\n        Thread reader1 = new Thread(() -> resource.read());\n        Thread reader2 = new Thread(() -> resource.read());\n\n        writer.start();\n        reader1.start();\n        reader2.start();\n\n        writer.join();\n        reader1.join();\n        reader2.join();\n    }\n}\n
"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#how-it-works","title":"How it Works ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#key-features","title":"Key Features","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#common-problems","title":"Common Problems","text":"Write Starvation

In scenarios with frequent readers, a writer may starve because readers keep acquiring the read lock, delaying the writer indefinitely.

Solution

Use a fair lock

ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);  // Enable fairness\n

Fair locks ensure that waiting writers get a chance to execute after the current readers finish.

DeadLock

If a read lock and write lock are acquired in an inconsistent order across multiple threads, it can lead to deadlock.

Deadlock example

Thread 1: Acquire write lock -> Attempt to acquire read lock (blocks)\nThread 2: Acquire read lock -> Attempt to acquire write lock (blocks)\n

Solution

Performance Degradation with Too Many Write Operations

If there are frequent write operations, the system behaves similarly to using a normal ReentrantLock, as readers must wait for writers to release the lock.

Solution

Use lock-free data structures (like AtomicReference) or ReadWriteLock only when reads significantly outnumber writes.

Incorrect Use of Lock Downgrading

If a thread holding the write lock tries to release it before acquiring the read lock, data inconsistencies can occur.

Correct Lock Downgrading Example

lock.writeLock().lock();\ntry {\n    // Write critical section\n    lock.readLock().lock();  // Downgrade to read lock\n} finally {\n    lock.writeLock().unlock();  // Release write lock\n}\n// Perform read operations under the read lock.\n
"},{"location":"langdives/Java/Locking-ReentrantReadWrite/#when-to-use","title":"When to Use ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#when-not-to-use","title":"When Not to Use ?","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Locking-ReentrantReadWrite/#summary","title":"Summary","text":"

ReentrantReadWriteLock is a powerful tool that allows multiple threads to read concurrently while ensuring exclusive access for writes. However, it is most effective in read-heavy scenarios. Understanding potential issues like write starvation, deadlocks, and performance degradation is essential for using this lock effectively, by following best practices like consistent lock ordering, minimizing lock duration, and monitoring lock usage, you can avoid common pitfalls and maximize the performance benefits of ReentrantReadWriteLock.

"},{"location":"langdives/Java/LockingIntrinsicReentrant/","title":"synchronized vs ReentrantLock","text":""},{"location":"langdives/Java/LockingIntrinsicReentrant/#differences","title":"Differences","text":"Feature synchronized ReentrantLock Basic Concept Uses intrinsic lock (monitor) on objects. Uses an explicit lock from java.util.concurrent.locks. Lock Acquisition Acquired implicitly when entering a synchronized block or method. Acquired explicitly via lock() method. Release of Lock Automatically released when the thread exits the synchronized block or method. Must be explicitly released via unlock(). Reentrancy Supports reentrancy (same thread can acquire the same lock multiple times). Supports reentrancy just like synchronized. Fairness Unfair by default (no control over thread access order). Can be fair or unfair (configurable with ReentrantLock(true)). Interruptibility Cannot respond to interrupts while waiting for the lock. Supports interruptible locking via lockInterruptibly(). Try Locking Not supported. A thread will block indefinitely if the lock is not available. Supports tryLock() to attempt locking without blocking or with timeout. Condition Variables Uses wait() / notify() / notifyAll() methods on the intrinsic lock. Supports multiple Condition objects for finer-grained wait/notify control. Timeout Support Not supported. If the lock is held by another thread, it will wait indefinitely. Supports timeout locking with tryLock(long timeout, TimeUnit unit). Performance Overhead Low for simple scenarios with little contention. Higher overhead but provides greater control over locking behavior. Fair Locking Option Not supported (always unfair). Fair locking can be enabled with ReentrantLock(true). Use in Non-blocking Operations Not possible. Possible with tryLock() (non-blocking). Flexibility and Control Limited to synchronized methods or blocks. Greater flexibility: lock multiple sections, lock only part of a method, or use multiple conditions. Suitability for Deadlock Avoidance Requires external logic to prevent deadlocks (acquire locks in the same order). Easier to prevent deadlocks using tryLock() and timeouts. Memory Usage No additional memory overhead. Uses the object\u2019s monitor. Requires additional memory for lock objects and lock metadata. Readability and Simplicity Easier to read and maintain (especially for small, simple use cases). More complex code with explicit lock management. Error Handling No need to manage lock release in a finally block. The lock is automatically released. Requires explicit unlock() in finally blocks to avoid deadlocks or memory leaks. Thread Starvation Prone to thread starvation in high contention scenarios. Can prevent starvation using fair lock mode. Recommended Use Case Best for simple synchronization needs where you don\u2019t need advanced control. Recommended for complex concurrency scenarios needing fine-grained locking, fairness, tryLock, or interruptibility."},{"location":"langdives/Java/LockingIntrinsicReentrant/#when-to-use","title":"When to Use ?","text":"Use synchronized Use ReentrantLock When you need simple, block-level or method-level synchronization. When you need advanced control over locking behavior (e.g., tryLock, fairness, or interruptibility). When you want automatic lock release (less error-prone). When you need multiple locks or condition variables. When performance matters in low-contention scenarios (lower overhead). When dealing with high contention and you need fair scheduling to prevent starvation. When you don't need non-blocking operations or timeouts. When you want non-blocking operations using tryLock() or timeout-based locking. When the code needs to be simple and easy to read. When code complexity is acceptable for greater flexibility."},{"location":"langdives/Java/LockingIntrinsicReentrant/#summary","title":"Summary","text":"

Both synchronized and ReentrantLock have their own strengths and use cases. Use synchronized for simpler, lower-level concurrency needs, and ReentrantLock when you need more control, fairness, or advanced features like non-blocking locking and condition variables.

In general: - synchronized is easier to use and less error-prone. - ReentrantLock is more powerful and flexible, but with more overhead and complexity.

"},{"location":"langdives/Java/Maven/","title":"Maven","text":""},{"location":"langdives/Java/Maven/#what-is-maven","title":"What is Maven ?","text":"

Apache Maven is a build automation and project management tool primarily for Java projects. It uses XML (pom.xml) to describe the project's structure, dependencies, and build lifecycle. Maven focuses on the \u201cconvention over configuration\u201d principle, meaning it provides a standard way to structure and build projects with minimal configuration.

"},{"location":"langdives/Java/Maven/#how-maven-works","title":"How Maven Works ?","text":"

Maven operates using a build lifecycle consisting of pre-defined phases. When you execute a specific phase, all preceding phases are executed as well.

Maven Lifecycle Phases Phase Description validate Validates the project structure. compile Compiles the source code. test Runs the unit tests. package Packages the compiled code into a JAR or WAR. verify Verifies the package meets specifications. install Installs the JAR into the local Maven repository. deploy Deploys the artifact to a remote repository.

Maven revolves around the POM (Project Object Model), which defines:

We will understand pom.xml in next section more.

"},{"location":"langdives/Java/Maven/#understanding-pomxml","title":"Understanding pom.xml","text":"

The POM (Project Object Model) file is the heart of a Maven project. It defines dependencies, build plugins, and project metadata.

Basic Example of pom.xml

<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n        xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n        xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 \n                            http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n    <modelVersion>4.0.0</modelVersion>\n\n    <groupId>com.example</groupId>\n    <artifactId>my-app</artifactId>\n    <version>1.0-SNAPSHOT</version>\n    <packaging>jar</packaging>\n\n    <dependencies>\n        <dependency>\n            <groupId>org.apache.commons</groupId>\n            <artifactId>commons-lang3</artifactId>\n            <version>3.12.0</version>\n        </dependency>\n    </dependencies>\n\n    <build>\n        <plugins>\n            <plugin>\n                <groupId>org.apache.maven.plugins</groupId>\n                <artifactId>maven-compiler-plugin</artifactId>\n                <version>3.8.1</version>\n                <configuration>\n                    <source>1.8</source>\n                    <target>1.8</target>\n                </configuration>\n            </plugin>\n        </plugins>\n    </build>\n</project>\n

Key Components of pom.xml

"},{"location":"langdives/Java/Maven/#dependency-management","title":"Dependency Management","text":"

Maven simplifies dependency management by automatically downloading required libraries from repositories.

Scopes of Dependencies

Example Dependency Declaration

<dependency>\n    <groupId>org.apache.commons</groupId>\n    <artifactId>commons-lang3</artifactId>\n    <version>3.12.0</version>\n</dependency>\n
"},{"location":"langdives/Java/Maven/#maven-repositories","title":"Maven Repositories","text":"

Maven resolves dependencies from repositories:

Adding a Custom Repository

<repositories>\n    <repository>\n        <id>my-repo</id>\n        <url>https://my-repo-url</url>\n    </repository>\n</repositories>\n
"},{"location":"langdives/Java/Maven/#maven-plugins","title":"Maven Plugins","text":"

Maven plugins extend its functionality. Plugins can handle tasks such as compiling, testing, or packaging.

Maven Compiler Plugin

<plugin>\n    <groupId>org.apache.maven.plugins</groupId>\n    <artifactId>maven-compiler-plugin</artifactId>\n    <version>3.8.1</version>\n    <configuration>\n        <source>1.8</source>\n        <target>1.8</target>\n    </configuration>\n</plugin>\n
This plugin ensures the source code is compiled with Java 8.

"},{"location":"langdives/Java/Maven/#maven-project-structure","title":"Maven Project Structure","text":"Maven recommended standard directory structure
/my-project\n\u2502\n\u251c\u2500\u2500 pom.xml               # Project Object Model configuration\n\u251c\u2500\u2500 src\n\u2502   \u2514\u2500\u2500 main\n\u2502       \u2514\u2500\u2500 java          # Source code\n\u2502   \u2514\u2500\u2500 test\n\u2502       \u2514\u2500\u2500 java          # Unit tests\n\u2514\u2500\u2500 target                # Output directory (JAR, WAR)\n

Maven supports multi-module projects, allowing multiple related projects to be managed together.

Directory Structure
/parent-project\n\u2502\n\u251c\u2500\u2500 pom.xml (parent)\n\u251c\u2500\u2500 module-1/\n\u2502   \u2514\u2500\u2500 pom.xml\n\u2514\u2500\u2500 module-2/\n    \u2514\u2500\u2500 pom.xml\n
The parent pom.xml defines the modules:
<modules>\n    <module>module-1</module>\n    <module>module-2</module>\n</modules>\n
Building all modules
mvn install\n
"},{"location":"langdives/Java/Maven/#maven-wrapper-mvnw","title":"Maven Wrapper (mvnw)","text":"

Similar to Gradle, Maven has a wrapper (mvnw) that ensures the project uses a specific Maven version.

Add Maven Wrapper
mvn -N io.takari:maven:wrapper\n
"},{"location":"langdives/Java/Maven/#maven-commands","title":"Maven Commands","text":"

Here are some common Maven commands

Command Description mvn compile Compiles the source code. mvn test Runs unit tests. mvn package Packages the code into a JAR/WAR. mvn install Installs the artifact to the local repository. mvn deploy Deploys the artifact to a remote repository. mvn clean Cleans the target/ directory. mvn dependency:tree Displays the project's dependency tree."},{"location":"langdives/Java/Maven/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Maven/#summary","title":"Summary","text":"

Maven is a mature, stable tool that simplifies building and managing Java applications. Its focus on conventions reduces the need for complex configurations, making it ideal for enterprise projects. While Maven may lack some of the flexibility and speed of Gradle, it is widely used in large organizations for its reliability and standardization. For projects requiring strict conventions and extensive dependency management, Maven remains a popular choice.

"},{"location":"langdives/Java/MavenVsGradle/","title":"Maven vs Gradle","text":""},{"location":"langdives/Java/MavenVsGradle/#comparision","title":"Comparision","text":"Aspect Maven Gradle Configuration Style Uses XML (pom.xml). Uses Groovy/Kotlin DSL (build.gradle). Performance Slower, especially for large projects (no build caching). Faster with incremental builds and caching. Flexibility Follows convention over configuration, less customizable. Highly customizable, supports custom build logic. Dependency Management Maven Central and custom repositories. Supports Maven Central, JCenter, Ivy, and custom repositories. Plugin System Pre-built Maven plugins (strict lifecycle integration). More flexible plugins with multiple custom task types. Build Output Produces JARs, WARs, and other artifacts. Produces JARs, WARs, and custom artifacts more easily. Multi-Project Support Good for enterprise projects with structured multi-module builds. Excellent for multi-module projects, especially in complex setups. Integration with CI/CD Easily integrates with Jenkins, GitHub Actions, Bamboo. Same level of integration with Jenkins, CircleCI, GitHub Actions. Use in Android Development Not suitable. Preferred build tool for Android development. Incremental Builds Not supported. Supported, resulting in faster builds. Offline Mode Uses the local Maven repository (~/.m2/repository). Uses a local cache (~/.gradle/caches/) and has offline mode. Version Control of Build Tool Maven Wrapper (mvnw) ensures consistent versions. Gradle Wrapper (gradlew) ensures consistent versions. Preferred Projects Enterprise Java applications with well-defined standards. Android apps, complex and large projects with custom build requirements."},{"location":"langdives/Java/MavenVsGradle/#when-to-use-gradle","title":"When to Use Gradle ?","text":""},{"location":"langdives/Java/MavenVsGradle/#when-to-use-maven","title":"When to Use Maven ?","text":""},{"location":"langdives/Java/MavenVsGradle/#advantages-of-gradle","title":"Advantages of Gradle","text":""},{"location":"langdives/Java/MavenVsGradle/#advantages-of-maven","title":"Advantages of Maven","text":""},{"location":"langdives/Java/MavenVsGradle/#which-tool-is-preferred","title":"Which Tool is Preferred?","text":""},{"location":"langdives/Java/MavenVsGradle/#where-gradle-maven-fit","title":"Where Gradle Maven Fit ?","text":"Component Role Gradle / Maven Interaction JDK (Java Development Kit) Provides tools to compile Java code into bytecode. Gradle and Maven use the JDK compiler (javac) to build code. JVM (Java Virtual Machine) Runs the compiled bytecode (.class files). Gradle/Maven can execute unit tests and applications on the JVM. JRE (Java Runtime Environment) Provides the libraries required to run Java applications. The output artifacts (e.g., JAR/WAR) produced by Gradle/Maven require the JRE to run. "},{"location":"langdives/Java/MavenVsGradle/#summary","title":"Summary","text":"

In conclusion, both Maven and Gradle are excellent tools, and the choice depends on the project requirements. For enterprise applications, Maven remains a solid choice. For Android apps, large multi-module projects, or performance-critical builds, Gradle stands out as the preferred option.

"},{"location":"langdives/Java/MemoryModel/","title":"Java Memory Model","text":"

Java uses a memory model that divides memory into different areas, primarily the heap and stack.

"},{"location":"langdives/Java/MemoryModel/#heap-memory","title":"Heap Memory","text":"

The heap is mainly used for dynamic memory allocation. Objects created using the new keyword are stored in the heap. Coming to it's life time objects in the heap remain in memory until they are no longer referenced and are garbage collected. This means the lifetime of an object is not tied to the scope of a method and Accessing memory in the heap is slower than in the stack due to its dynamic nature and the potential for fragmentation.

Note

"},{"location":"langdives/Java/MemoryModel/#stack-memory","title":"Stack Memory","text":"

The stack is mainly used for static memory allocation. It stores method call frames, which contain local variables, method parameters, and return addresses. coming to the lifetime of a variable in the stack is limited to the duration of the method call. Once the method returns, the stack frame is popped off, and the memory is reclaimed and accessing stack memory is faster than heap memory because it follows a Last In, First Out (LIFO) order, allowing for quick allocation and deallocation.

Note

"},{"location":"langdives/Java/MemoryModel/#example","title":"Example","text":"

Example

public class MemoryExample {\n    public static void main(String[] args) {\n        int localVar = 10; // Stack memory\n\n        MemoryExample obj = new MemoryExample(); // Heap memory\n        obj.display(localVar); // Passing parameter, stack memory\n    }\n\n    public void display(int param) { // Stack memory\n        System.out.println(param);\n        String message = \"Hello\"; // Heap memory (String object)\n    }\n}\n
"},{"location":"langdives/Java/MemoryModel/#differences","title":"Differences","text":"Feature Heap Stack Allocation Dynamic Static Lifetime Until garbage collected Duration of method call Memory Size Larger (configurable) Smaller (configurable) Access Speed Slower Faster Data Type Objects, arrays Primitive types, references Management Garbage collection Automatically managed by JVM"},{"location":"langdives/Java/PrimitiveReferenceTypes/","title":"Primitive and Reference Types","text":""},{"location":"langdives/Java/PrimitiveReferenceTypes/#primitive-types","title":"Primitive Types","text":"

Java has 8 primitive data types that store simple values directly in memory.

Type Size Default Value Range Example byte 1 byte (8 bits) 0 -128 to 127 byte b = 100; short 2 bytes (16 bits) 0 -32,768 to 32,767 short s = 30000; int 4 bytes (32 bits) 0 -2^31 to (2^31)-1 int i = 100000; long 8 bytes (64 bits) 0L -2^63 to (2^63)-1 long l = 100000L; float 4 bytes (32 bits) 0.0f ~\u00b13.4E38 (7 decimal digits precision) float f = 3.14f; double 8 bytes (64 bits) 0.0 ~\u00b11.8E308 (15 decimal digits precision) double d = 3.14159; char 2 bytes (16 bits) '\\u0000' Unicode characters (0 to 65,535) char c = 'A'; boolean 1 bit (virtual) false true or false boolean b = true;"},{"location":"langdives/Java/PrimitiveReferenceTypes/#reference-types","title":"Reference Types","text":"

Reference types store references (addresses) to objects in memory, unlike primitive types that store values directly.

Primitive Wrapper Class byte Byte short Short int Integer long Long float Float double Double char Character boolean Boolean"},{"location":"langdives/Java/PrimitiveReferenceTypes/#differences","title":"Differences","text":"Aspect Primitive Types Reference Types Storage Store actual values. Store references to objects in memory. Memory Allocation Stored in stack memory. Stored in heap memory. Default Values Zero/false equivalents. null for uninitialized references. Examples int, char, boolean. String, Arrays, Classes, Interfaces, etc."},{"location":"langdives/Java/ReferenceTypesInDepth/","title":"Reference Types In Depth.","text":"

Let's deep dive to understand How memory management, object references, and behaviors work in Java in this article, with a focus on String handling and other reference types like arrays, classes, and wrapper objects.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#intro","title":"Intro","text":"

Primitive types store values directly in stack memory Where as Reference types store references (addresses) to objects located in heap memory, When you assign a reference type (e.g., String or an array), only the reference (address) is copied, not the actual data. This means multiple references can point to the same object.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#string","title":"String","text":"

The String class in Java is a special reference type with some unique behaviors. Strings are immutable once a String object is created, it cannot be changed. Any modification on a String results in the creation of a new object in memory.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#string-pool-interned-strings","title":"String Pool (Interned Strings)","text":"

A special memory area inside the heap used to store string literals. If a string literal like \"Hello\" is created, Java first checks the string pool to see if it already exists. If it does, it returns the reference from the pool. If not, the string is added to the pool.

Example
String s1 = \"Hello\";  // Stored in the String Pool\nString s2 = \"Hello\";  // s2 points to the same object as s1\n\nSystem.out.println(s1 == s2);  // true (same reference)\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#heap-memory","title":"Heap Memory","text":"

When you use the new keyword, a new String object is always created in the heap memory. Even if the same string already exists in the string pool, the new keyword forces the creation of a separate instance in the heap.

Example

String s1 = new String(\"Hello\"); // creates a new object outside the pool in the heap.\n\nString s2 = \"Hello\"; // Stored in the String Pool\n\nSystem.out.println(s1 == s2);  // false (different references)\n
When you use new String(), Java forces the creation of a new object in heap even if the same string exists in the pool.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#arrays","title":"Arrays","text":"

Arrays are reference types, meaning the array variable stores a reference to the memory location where the array data is stored.

Example
int[] arr1 = {1, 2, 3};\nint[] arr2 = arr1;  // arr2 now references the same array as arr1\n\narr2[0] = 10;  // Modifies the original array\n\nSystem.out.println(arr1[0]);  // Output: 10 (both arr1 and arr2 reference the same array)\n

How Array References Work:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#classes-and-objects","title":"Classes and Objects","text":"

When you create an object using new, the reference variable points to the object in heap memory.

Example
class Person {\n    String name;\n}\n\nPerson p1 = new Person();\np1.name = \"Alice\";\n\nPerson p2 = p1;  // p2 points to the same object as p1\np2.name = \"Bob\";\n\nSystem.out.println(p1.name);  // Output: Bob (both references point to the same object)\n

How References Work with Objects:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#wrapper-classes","title":"Wrapper Classes","text":"

Wrapper classes (Integer, Double, Boolean, etc.) wrap primitive types into objects. These are reference types, and Java performs autoboxing/unboxing to convert between primitive types and wrapper objects.

Example
Integer num1 = 100;\nInteger num2 = 100;\n\nSystem.out.println(num1 == num2);  // true (for values within -128 to 127)\n\nInteger num3 = 200;\nInteger num4 = 200;\n\nSystem.out.println(num3 == num4);  // false (new objects for values beyond 127)\n

Wrapper Caching

"},{"location":"langdives/Java/ReferenceTypesInDepth/#reference-and-deep-copy","title":"Reference and Deep Copy","text":"

Shallow Copy: Copies only the reference, so both variables refer to the same object.

Example
int[] original = {1, 2, 3};\nint[] shallowCopy = original;  // Points to the same array\n\nshallowCopy[0] = 100;\nSystem.out.println(original[0]);  // Output: 100\n

Deep Copy: Creates a new object with the same data.

Example
int[] original = {1, 2, 3};\nint[] deepCopy = original.clone();  // Creates a new array\n\ndeepCopy[0] = 100;\nSystem.out.println(original[0]);  // Output: 1\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#nullnullpointerexception","title":"Null/NullPointerException","text":"

When a reference is not initialized, it holds the value null. Accessing a field or method on a null reference throws a NullPointerException.

Example
Person p = null;\nSystem.out.println(p.name);  // Throws NullPointerException\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#garbage-collection","title":"Garbage Collection","text":"

Java uses Garbage Collection to manage memory. When no references point to an object, it becomes eligible for garbage collection.

Example
Person p1 = new Person();  // Object created\np1 = null;  // Now eligible for garbage collection\n

We will learn about garbage collection more in depth in another article.

"},{"location":"langdives/Java/ReferenceTypesInDepth/#summary","title":"Summary","text":""},{"location":"langdives/Java/ReferenceTypesInDepth/#string-pool-in-depth","title":"String Pool In Depth","text":"

The String Pool (also called the intern pool) in Java is implemented using a Hash Table-like data structure internally. Let\u2019s explore the design and behavior behind this structure:

"},{"location":"langdives/Java/ReferenceTypesInDepth/#internals","title":"Internals","text":" Simplified conceptual pseudocode Example

How the pool works internally

class StringPool {\n    private static Map<String, String> pool = new HashMap<>();\n\n    public static String intern(String str) {\n        if (pool.containsKey(str)) {\n            return pool.get(str);  // Return existing reference\n        } else {\n            pool.put(str, str);    // Add to the pool\n            return str;\n        }\n    }\n}\n
- When calling String.intern(), Java interns the string, meaning it adds the string to the pool if it's not already present.

String Pool Usage Example
public class Main {\n    public static void main(String[] args) {\n        String s1 = new String(\"Hello\");\n        String s2 = s1.intern();  // Adds \"Hello\" to the pool, if not already present\n\n        String s3 = \"Hello\";  // Uses the interned string from the pool\n\n        System.out.println(s2 == s3);  // true (same reference from the pool)\n    }\n}\n
"},{"location":"langdives/Java/ReferenceTypesInDepth/#why-use-hash-table","title":"Why Use Hash Table ?","text":"

Key Takeaways

"},{"location":"langdives/Java/ReferenceTypesInDepth/#string-pool-summary","title":"String pool Summary","text":"

The String Pool is implemented using a Hash Table-like data structure, This allows for efficient string reuse through fast lookups and ensures no duplicate literals are created. Strings added via literals or intern() are stored in the pool, with existing references returned on subsequent requests.

"},{"location":"langdives/Java/StreamsLambdas/","title":"Streams and Lambdas","text":""},{"location":"langdives/Java/StreamsLambdas/#lambda-expressions","title":"Lambda Expressions","text":"

Enables functional programming by treating functions as first-class citizens.

Example
List<String> names = Arrays.asList(\"Alice\", \"Bob\", \"Charlie\");\nnames.forEach(name -> System.out.println(name));\n
"},{"location":"langdives/Java/StreamsLambdas/#functional-interfaces","title":"Functional Interfaces","text":"

A functional interface is an interface with only one abstract method. This is important because lambda expressions can be used to provide the implementation for these interfaces.

Example Example functional interface
@FunctionalInterface  // Optional but ensures the interface has only one abstract method.\ninterface MyFunction {\n    int apply(int a, int b);  // Single abstract method\n}\n

Now, when you want to use this interface, you don\u2019t need to create a class and provide an implementation like before. Instead, you can use a lambda expression to quickly provide the logic.

Using Lambda with MyFunction
MyFunction addition = (a, b) -> a + b;  // Lambda expression for addition\nSystem.out.println(addition.apply(5, 3));  // Output: 8\n
Explanation "},{"location":"langdives/Java/StreamsLambdas/#method-references","title":"Method References","text":"

A method reference is a shorthand way of writing a lambda when a method already exists that matches the lambda\u2019s purpose. This makes the code more concise and readable.

Example with forEach and Method Reference

Consider the following list of names:

List<String> names = Arrays.asList(\"Alice\", \"Bob\", \"Charlie\");\n

You want to print all names using forEach(). You could do it with a lambda like this:

names.forEach(name -> System.out.println(name));  // Lambda expression\n

Now, Java provides a shorthand: Method Reference. Since System.out.println() already matches the structure (String) -> void, you can write:

names.forEach(System.out::println);  // Method reference\n

Explanation

Use method references when:

More Examples
// 1. Static method reference\nFunction<String, Integer> parse = Integer::parseInt;\nSystem.out.println(parse.apply(\"123\"));  // Output: 123\n\n// 2. Instance method reference on an arbitrary object\nList<String> words = Arrays.asList(\"one\", \"two\", \"three\");\nwords.sort(String::compareToIgnoreCase);  // Sorts case-insensitively\n
"},{"location":"langdives/Java/StreamsLambdas/#streams-api","title":"Streams API","text":"

Introduced in Java 8 to process collections in a declarative way.

Core Stream Operations

"},{"location":"langdives/Java/StreamsLambdas/#creation","title":"Creation","text":"
Stream<Integer> stream = Stream.of(1, 2, 3, 4);\nList<String> list = Arrays.asList(\"A\", \"B\", \"C\");\nStream<String> streamFromList = list.stream();\n
"},{"location":"langdives/Java/StreamsLambdas/#intermediate-operations-return-new-streams-lazy-evaluation","title":"Intermediate Operations (return new streams, lazy evaluation)","text":"filter()
// Filters elements based on a predicate.\nList<Integer> evenNumbers = stream.filter(n -> n % 2 == 0).toList();\n
map()
// Transforms elements.\nList<Integer> lengths = list.stream().map(String::length).toList();\n
sorted()
// Sorts elements.\nList<Integer> sortedList = stream.sorted().toList();\n
"},{"location":"langdives/Java/StreamsLambdas/#terminal-operations-trigger-computation","title":"Terminal Operations (trigger computation)","text":"forEach()
// Iterates through elements.\nlist.stream().forEach(System.out::println);\n
collect()
// Collects elements into a collection.\nList<String> newList = list.stream().filter(s -> s.startsWith(\"A\")).collect(Collectors.toList());\n
reduce()
// Reduces the elements to a single result.\nint sum = Stream.of(1, 2, 3, 4).reduce(0, Integer::sum);\n
"},{"location":"langdives/Java/StreamsLambdas/#parallel-streams","title":"Parallel Streams","text":"
// Used to process elements in parallel for better performance.\nlist.parallelStream().forEach(System.out::println);\n
"},{"location":"langdives/Java/StreamsLambdas/#examples-streamslambdas","title":"Examples Streams/Lambdas","text":"Find the sum of even numbers
int sumOfEvens = Stream.of(1, 2, 3, 4, 5, 6)\n                      .filter(n -> n % 2 == 0)\n                      .reduce(0, Integer::sum);\nSystem.out.println(sumOfEvens);  // Output: 12\n
Convert List of Strings to Uppercase
List<String> upperCaseNames = list.stream()\n                                  .map(String::toUpperCase)\n                                  .collect(Collectors.toList());\n
Group elements by length
Map<Integer, List<String>> groupedByLength = list.stream()\n                                                .collect(Collectors.groupingBy(String::length));\n
"},{"location":"langdives/Java/StreamsLambdas/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/ThreadPoolTuning/","title":"Thread Pool Configuration Tuning","text":"

Thread pool configuration is critical for optimizing the performance of your applications. Poorly configured thread pools can lead to problems such as CPU starvation, thread contention, memory exhaustion, or poor resource utilization. In this Article, we\u2019ll dive deep into CPU-bound vs I/O-bound tasks, explore how to determine optimal thread pool sizes, and discuss key considerations such as queue types and rejection policies.

"},{"location":"langdives/Java/ThreadPoolTuning/#cpu-vs-io-bound-tasks","title":"CPU vs I/O Bound Tasks","text":"

When configuring thread pools, it is essential to classify your tasks as CPU-bound or I/O-bound, as this distinction guides the number of threads your pool should maintain.

"},{"location":"langdives/Java/ThreadPoolTuning/#cpu-bound-tasks","title":"CPU-Bound Tasks","text":"

Tasks that perform intensive computations (e.g., mathematical calculations, data processing, encoding), and here limiting factor is the CPU core availability. So its better to avoid context switching overhead by keeping the number of threads close to the available CPU cores.

Optimal Thread Pool Size for CPU-Bound Tasks
int coreCount = Runtime.getRuntime().availableProcessors();\nExecutorService cpuBoundPool = Executors.newFixedThreadPool(coreCount);\n

Note

If more threads than CPU cores are running, threads will compete for CPU cycles, causing context switching, which adds overhead.

Optimal Threads = Number of Cores\n

When to use ? "},{"location":"langdives/Java/ThreadPoolTuning/#io-bound-tasks","title":"I/O-Bound Tasks","text":"

Tasks that spend most of the time waiting for I/O operations (e.g., network, database, file I/O). and here the limiting factor is the time spent waiting on I/O. So it's better to use more threads than the number of cores to ensure that idle CPU cycles are used efficiently while waiting for I/O.

Optimal Thread Pool Size for I/O-Bound Tasks
int coreCount = Runtime.getRuntime().availableProcessors();\nint optimalThreads = coreCount * 2 + 1;\nExecutorService ioBoundPool = Executors.newFixedThreadPool(optimalThreads);\n

Note

Since the tasks spend significant time waiting for I/O, more threads can be created to make sure the CPU is not idle while other threads wait for input/output operations.

Optimal Threads = Number of Cores * (1 + Wait Time / Compute Time)\n

When to use ? "},{"location":"langdives/Java/ThreadPoolTuning/#queues-for-threadpoolexecutor","title":"Queues for ThreadPoolExecutor","text":"

Choosing the right work queue is crucial for memory management and task scheduling. The queue holds tasks waiting to be executed when all threads are busy.

"},{"location":"langdives/Java/ThreadPoolTuning/#unbounded-queue","title":"Unbounded Queue","text":"

A queue with no size limit, but if too many tasks are submitted, it can lead to memory exhaustion (out-of-memory errors).

LinkedBlockingQueue
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>();\n
When to use ?

Suitable only if you expect tasks to complete quickly and the queue will not grow indefinitely.

"},{"location":"langdives/Java/ThreadPoolTuning/#bounded-queue","title":"Bounded Queue","text":"

A queue with a fixed size limit, it prevents unbounded memory usage, and If the queue is full, tasks will be rejected or handled based on a rejection policy.

ArrayBlockingQueue
BlockingQueue<Runnable> queue = new ArrayBlockingQueue<>(10);\n
When to use ?

Ideal for controlled environments where you want to cap the number of waiting tasks.

"},{"location":"langdives/Java/ThreadPoolTuning/#thread-pool-size-tuning","title":"Thread Pool Size Tuning","text":"For CPU-Bound Tasks
Optimal Threads = Number of Cores\n
For I/O-Bound Tasks
Optimal Threads = Number of Cores * (1 + Wait Time / Compute Time)\n
Example

If a thread spends 70% of the time waiting on I/O, and only 30% performing work:

Optimal Threads = 4 * (1 + 0.7 / 0.3) = 12\n

"},{"location":"langdives/Java/ThreadPoolTuning/#rejection-policies","title":"Rejection Policies","text":"

When the task queue is full and the pool is at its maximum size, the ThreadPoolExecutor must decide what to do with new tasks. You can configure rejection policies to handle these situations.

"},{"location":"langdives/Java/ThreadPoolTuning/#abortpolicy-default","title":"AbortPolicy (Default)","text":""},{"location":"langdives/Java/ThreadPoolTuning/#callerrunspolicy","title":"CallerRunsPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#discardpolicy","title":"DiscardPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#discardoldestpolicy","title":"DiscardOldestPolicy","text":""},{"location":"langdives/Java/ThreadPoolTuning/#monitoring-thread-pools","title":"Monitoring Thread Pools","text":"

Monitoring thread pools ensures that your configuration is correct and performing well. You can monitor the following metrics:

Key Metrics to Monitor

Example: Monitoring Active Threads
ThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS,\n      new ArrayBlockingQueue<>(2));\n\nSystem.out.println(\"Active Threads: \" + executor.getActiveCount());\nSystem.out.println(\"Task Count: \" + executor.getTaskCount());\nSystem.out.println(\"Completed Tasks: \" + executor.getCompletedTaskCount());\n
"},{"location":"langdives/Java/ThreadPoolTuning/#dynamic-thread-pool-adjustment","title":"Dynamic Thread Pool Adjustment","text":"

Sometimes, you may need to adjust the pool size at runtime to respond to changing workloads.

Example: Adjusting Thread Pool Size Dynamically
ThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS,\n      new ArrayBlockingQueue<>(10));\n\n// Adjust core and max pool size dynamically\nexecutor.setCorePoolSize(3);\nexecutor.setMaximumPoolSize(6);\n
"},{"location":"langdives/Java/ThreadPoolTuning/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/ThreadPools/","title":"Thread Pools.","text":""},{"location":"langdives/Java/ThreadPools/#what-is-a-thread-pool","title":"What is a Thread Pool ?","text":"

A thread pool is a collection of worker threads that are created at the start and reused to perform multiple tasks. When tasks are submitted to the pool, a free thread picks up the task and executes it. If no threads are free, the tasks wait in a queue until one becomes available.

"},{"location":"langdives/Java/ThreadPools/#advantages-of-thread-pooling","title":"Advantages of Thread Pooling","text":""},{"location":"langdives/Java/ThreadPools/#creating-thread-pools","title":"Creating Thread Pools","text":""},{"location":"langdives/Java/ThreadPools/#ways-to-create","title":"Ways to Create","text":""},{"location":"langdives/Java/ThreadPools/#fixed-thread-pool","title":"Fixed Thread Pool","text":"

Creates a pool with a fixed number of threads. When all threads are busy, tasks are placed in a queue and executed as soon as a thread becomes available.

newFixedThreadPool
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class FixedThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(3);\n\n        for (int i = 1; i <= 6; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#cached-thread-pool","title":"Cached Thread Pool","text":"

A dynamic thread pool where threads are created as needed. If threads are idle for 60 seconds, they are terminated. If a thread is available, it will be reused for a new task.

newCachedThreadPool
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class CachedThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newCachedThreadPool();\n\n        for (int i = 1; i <= 5; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? Drawbacks "},{"location":"langdives/Java/ThreadPools/#single-thread-executor","title":"Single Thread Executor","text":"

A single-threaded executor that ensures tasks are executed sequentially in the order they are submitted. If the thread dies due to an exception, a new thread is created to replace it.

newSingleThreadExecutor
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class SingleThreadExecutorExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newSingleThreadExecutor();\n\n        for (int i = 1; i <= 3; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#scheduled-thread-pool","title":"Scheduled Thread Pool","text":"

A scheduled thread pool allows you to schedule tasks to run after a delay or periodically at a fixed rate.

newScheduledThreadPool
import java.util.concurrent.Executors;\nimport java.util.concurrent.ScheduledExecutorService;\nimport java.util.concurrent.TimeUnit;\n\npublic class ScheduledThreadPoolExample {\n    public static void main(String[] args) {\n        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(2);\n\n        Runnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\n\n        // Schedule task to run after 3 seconds\n        scheduler.schedule(task, 3, TimeUnit.SECONDS);\n\n        // Schedule task to run repeatedly every 2 seconds\n        scheduler.scheduleAtFixedRate(task, 1, 2, TimeUnit.SECONDS);\n\n        // Allow the tasks to complete after 10 seconds\n        scheduler.schedule(() -> scheduler.shutdown(), 10, TimeUnit.SECONDS);\n    }\n}\n
Advantages When to Use ? "},{"location":"langdives/Java/ThreadPools/#threadpoolexecutor","title":"ThreadPoolExecutor","text":"

ThreadPoolExecutor is the core implementation of thread pools in Java. Using it allows you to fine-tune the thread pool\u2019s behavior with more control over the number of threads, queue type, and rejection policy.

Parameters of ThreadPoolExecutor
ThreadPoolExecutor executor = new ThreadPoolExecutor(\n        corePoolSize,      // Minimum number of threads\n        maximumPoolSize,   // Maximum number of threads\n        keepAliveTime,     // Idle time before a thread is terminated\n        timeUnit,          // Time unit for keepAliveTime\n        workQueue,         // Queue to hold waiting tasks\n        threadFactory,     // Factory to create new threads\n        handler            // Rejection policy when the queue is full\n);\n
Custom Thread Pool
import java.util.concurrent.*;\n\npublic class CustomThreadPoolExecutorExample {\n    public static void main(String[] args) {\n        ThreadPoolExecutor executor = new ThreadPoolExecutor(\n                2, 4, 30, TimeUnit.SECONDS,\n                new LinkedBlockingQueue<>(2),   // Task queue with capacity 2\n                Executors.defaultThreadFactory(),\n                new ThreadPoolExecutor.CallerRunsPolicy() // Rejection policy\n        );\n\n        // Submit 6 tasks to the pool\n        for (int i = 1; i <= 6; i++) {\n            int taskId = i;\n            executor.execute(() -> {\n                System.out.println(\"Task \" + taskId + \" executed by \" + Thread.currentThread().getName());\n            });\n        }\n\n        executor.shutdown();\n    }\n}\n
Advantages When to Use ?

Common Rejection Policies in ThreadPoolExecutor

"},{"location":"langdives/Java/ThreadPools/#comparison","title":"Comparison","text":"Thread Pool Type Concurrency Parallelism Task Type When to Use Fixed Thread Pool Yes Yes Long-running tasks Limited number of known tasks. Cached Thread Pool Yes Yes Short-lived tasks Dynamic workloads with many I/O tasks. Single Thread Executor No No Sequential tasks Strictly ordered execution. Scheduled Thread Pool Yes Yes Timed or periodic tasks Periodic background tasks. Custom ThreadPoolExecutor Yes Yes Mixed Advanced control and tuning."},{"location":"langdives/Java/ThreadPools/#interface-concepts","title":"Interface Concepts","text":""},{"location":"langdives/Java/ThreadPools/#runnable-interface","title":"Runnable Interface","text":"

The Runnable interface represents a task that can run asynchronously in a thread but does not return any result or throw a checked exception.

Structure
@FunctionalInterface\npublic interface Runnable {\n    void run();\n}\n
Example
public class RunnableExample {\n    public static void main(String[] args) {\n        Runnable task = () -> {\n            System.out.println(\"Executing task in: \" + Thread.currentThread().getName());\n        };\n\n        Thread thread = new Thread(task);\n        thread.start();\n    }\n}\n
When to Use ? "},{"location":"langdives/Java/ThreadPools/#callable-interface","title":"Callable Interface","text":"

The Callable interface is similar to Runnable, but it can return a result and throw a checked exception.

Structure
@FunctionalInterface\npublic interface Callable<V> {\n    V call() throws Exception;\n}\n
Example
import java.util.concurrent.Callable;\n\npublic class CallableExample {\n    public static void main(String[] args) throws Exception {\n        Callable<Integer> task = () -> {\n            System.out.println(\"Executing task in: \" + Thread.currentThread().getName());\n            return 42;\n        };\n\n        // Direct call (for demonstration)\n        Integer result = task.call();\n        System.out.println(\"Task result: \" + result);\n    }\n}\n
When to Use ? "},{"location":"langdives/Java/ThreadPools/#future-interface","title":"Future Interface","text":"

A Future represents the result of an asynchronous computation. It provides methods to check if the computation is complete, wait for the result, and cancel the task if necessary.

Structure
public interface Future<V> {\n    boolean cancel(boolean mayInterruptIfRunning);\n    boolean isCancelled();\n    boolean isDone();\n    V get() throws InterruptedException, ExecutionException;\n    V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;\n}\n
Example
import java.util.concurrent.*;\n\npublic class FutureExample {\n    public static void main(String[] args) throws ExecutionException, InterruptedException {\n        ExecutorService executor = Executors.newSingleThreadExecutor();\n\n        Callable<Integer> task = () -> {\n            Thread.sleep(2000); // Simulate some work\n            return 42;\n        };\n\n        Future<Integer> future = executor.submit(task);\n\n        // Do something else while the task executes asynchronously\n        System.out.println(\"Task is running...\");\n\n        // Wait for the result\n        Integer result = future.get();\n        System.out.println(\"Task result: \" + result);\n\n        executor.shutdown();\n    }\n}\n
When to Use ? Key Methods "},{"location":"langdives/Java/ThreadPools/#blockingqueue-interface","title":"BlockingQueue Interface","text":"

BlockingQueue is a thread-safe queue that blocks the calling thread when:

Structure
public interface BlockingQueue<E> extends Queue<E> {\n    void put(E e) throws InterruptedException;\n    E take() throws InterruptedException;\n    // Other methods for timed operations, size, etc.\n}\n
Example
import java.util.concurrent.*;\n\npublic class BlockingQueueExample {\n    public static void main(String[] args) {\n        BlockingQueue<Integer> queue = new ArrayBlockingQueue<>(2);\n\n        // Producer thread\n        new Thread(() -> {\n            try {\n                queue.put(1);\n                System.out.println(\"Added 1 to the queue\");\n                queue.put(2);\n                System.out.println(\"Added 2 to the queue\");\n                queue.put(3); // This will block until space is available\n                System.out.println(\"Added 3 to the queue\");\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        }).start();\n\n        // Consumer thread\n        new Thread(() -> {\n            try {\n                Thread.sleep(1000); // Simulate some delay\n                System.out.println(\"Removed from queue: \" + queue.take());\n                System.out.println(\"Removed from queue: \" + queue.take());\n                System.out.println(\"Removed from queue: \" + queue.take());\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        }).start();\n    }\n}\n
Usages ?

Types of BlockingQueues

"},{"location":"langdives/Java/ThreadPools/#runnable-vs-callable","title":"Runnable vs Callable","text":"Aspect Runnable Callable Result No result Returns a result Exception Handling Cannot throw checked exceptions Can throw checked exceptions Functional Interface Yes (run() method) Yes (call() method) Use Case Simple background tasks Tasks that need to return a value or throw an exception"},{"location":"langdives/Java/ThreadPools/#how-these-work-together","title":"How These Work Together","text":"Using Runnable in a Thread Pool
ExecutorService executor = Executors.newFixedThreadPool(2);\nRunnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\nexecutor.execute(task);\nexecutor.shutdown();\n
Using Callable with Future in a Thread Pool
ExecutorService executor = Executors.newFixedThreadPool(2);\nCallable<Integer> task = () -> 42;\nFuture<Integer> future = executor.submit(task);\nSystem.out.println(\"Result: \" + future.get());\nexecutor.shutdown();\n
Using BlockingQueue with ThreadPoolExecutor
BlockingQueue<Runnable> queue = new LinkedBlockingQueue<>(2);\nThreadPoolExecutor executor = new ThreadPoolExecutor(2, 4, 30, TimeUnit.SECONDS, queue);\nRunnable task = () -> System.out.println(\"Task executed by \" + Thread.currentThread().getName());\nexecutor.execute(task);\nexecutor.shutdown();\n
"},{"location":"langdives/Java/Threads-Atomicity/","title":"Atomicity","text":"

Atomicity is a fundamental concept in multithreading and concurrency that ensures operations are executed entirely or not at all, with no intermediate states visible to other threads. In Java, atomicity plays a crucial role in maintaining data consistency in concurrent environments.

This Article covers everything about atomic operations, issues with atomicity, atomic classes in Java, and best practices to ensure atomic behavior in your code.

"},{"location":"langdives/Java/Threads-Atomicity/#what-is-atomicity","title":"What is Atomicity ?","text":"

In a multithreaded program, atomicity guarantees that operations are executed as a single, indivisible unit. When an operation is atomic, it ensures that:

"},{"location":"langdives/Java/Threads-Atomicity/#why-it-is-important","title":"Why it is Important ?","text":"

Without atomic operations, multiple threads could interfere with each other, leading to race conditions and data inconsistencies. For example, if two threads try to increment a shared counter simultaneously, the result may not reflect both increments due to interleaving of operations.

"},{"location":"langdives/Java/Threads-Atomicity/#problems","title":"Problems ?","text":"Non-Atomic Operations on Primitive Data Types Counter Increment Example
class Counter {\n    private int count = 0;\n\n    public void increment() {\n        count++;  // Not atomic\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n

Problem

The statement count++ is not atomic. It consists of three operations

If two threads execute count++ simultaneously, one increment might be lost due to race conditions.

"},{"location":"langdives/Java/Threads-Atomicity/#how-to-ensure-atomicity","title":"How to Ensure Atomicity ?","text":"

Java provides several ways to ensure atomicity, including:

"},{"location":"langdives/Java/Threads-Atomicity/#javas-atomic-classes","title":"Java\u2019s Atomic Classes","text":"

The java.util.concurrent.atomic package offers classes that support lock-free, thread-safe operations on single variables. These classes rely on low-level atomic operations (like CAS \u2014 Compare-And-Swap) provided by the underlying hardware.

"},{"location":"langdives/Java/Threads-Atomicity/#common-atomic-classes","title":"Common Atomic Classes","text":""},{"location":"langdives/Java/Threads-Atomicity/#atomicinteger","title":"AtomicInteger","text":"Example: Solving the Increment Problem
import java.util.concurrent.atomic.AtomicInteger;\n\nclass AtomicCounter {\n    private final AtomicInteger count = new AtomicInteger(0);\n\n    public void increment() {\n        count.incrementAndGet();  // Atomic increment\n    }\n\n    public int getCount() {\n        return count.get();\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        AtomicCounter counter = new AtomicCounter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) {\n                counter.increment();\n            }\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) {\n                counter.increment();\n            }\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final Count: \" + counter.getCount());  // Output: 2000\n    }\n}\n
Explanation "},{"location":"langdives/Java/Threads-Atomicity/#atomicboolean","title":"AtomicBoolean","text":"Example: Managing Flags Safely
import java.util.concurrent.atomic.AtomicBoolean;\n\nclass FlagManager {\n    private final AtomicBoolean isActive = new AtomicBoolean(false);\n\n    public void activate() {\n        if (isActive.compareAndSet(false, true)) {\n            System.out.println(\"Flag activated.\");\n        } else {\n            System.out.println(\"Flag already active.\");\n        }\n    }\n\n    public void deactivate() {\n        if (isActive.compareAndSet(true, false)) {\n            System.out.println(\"Flag deactivated.\");\n        } else {\n            System.out.println(\"Flag already inactive.\");\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        FlagManager manager = new FlagManager();\n\n        Thread t1 = new Thread(manager::activate);\n        Thread t2 = new Thread(manager::activate);\n\n        t1.start();\n        t2.start();\n    }\n}\n
Explanation

compareAndSet() changes the flag only if it matches the expected value, ensuring thread safety.

"},{"location":"langdives/Java/Threads-Atomicity/#atomicreference","title":"AtomicReference","text":"Example: Atomic Operations on Objects
import java.util.concurrent.atomic.AtomicReference;\n\nclass Person {\n    String name;\n\n    Person(String name) {\n        this.name = name;\n    }\n}\n\npublic class AtomicReferenceExample {\n    public static void main(String[] args) {\n        AtomicReference<Person> personRef = new AtomicReference<>(new Person(\"Alice\"));\n\n        // Atomic update of the reference\n        personRef.set(new Person(\"Bob\"));\n        System.out.println(\"Updated Person: \" + personRef.get().name);\n    }\n}\n

When to Use ?

Use AtomicReference when you need atomic operations on object references.

"},{"location":"langdives/Java/Threads-Atomicity/#atomicstampedreference","title":"AtomicStampedReference","text":"

The ABA problem occurs when a value changes from A to B and then back to A. AtomicStampedReference solves this by associating a version (stamp) with the value.

Example: ABA problem prevention
import java.util.concurrent.atomic.AtomicStampedReference;\n\npublic class AtomicStampedReferenceExample {\n    public static void main(String[] args) {\n        AtomicStampedReference<Integer> ref = new AtomicStampedReference<>(1, 0);\n\n        int[] stamp = new int[1];\n        Integer value = ref.get(stamp);\n        System.out.println(\"Initial Value: \" + value + \", Stamp: \" + stamp[0]);\n\n        boolean success = ref.compareAndSet(1, 2, stamp[0], stamp[0] + 1);\n        System.out.println(\"CAS Success: \" + success + \", New Value: \" + ref.get(stamp) + \", New Stamp: \" + stamp[0]);\n    }\n}\n
Explanation

AtomicStampedReference ensures that the same value change does not go undetected by tracking the version.

"},{"location":"langdives/Java/Threads-Atomicity/#performance","title":"Performance ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#when-to-use","title":"When to Use ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#limitations","title":"Limitations ?","text":""},{"location":"langdives/Java/Threads-Atomicity/#best-practices","title":"Best Practices","text":""},{"location":"langdives/Java/Threads-Atomicity/#summary","title":"Summary","text":"

The atomic classes in Java\u2019s java.util.concurrent.atomic package offer lock-free, thread-safe operations that are ideal for simple state management. By ensuring atomicity, these classes help avoid race conditions and improve the performance and scalability of multithreaded applications. However, they are best suited for single-variable updates for more complex operations, locks or transactional mechanisms may still be necessary.

"},{"location":"langdives/Java/Threads/","title":"Threads","text":"

Java offers multithreading to perform multiple tasks concurrently, improving performance and responsiveness. This deep dive covers every key concept of Java threading with detailed explanations and code examples.

Before that let's have a quick rewind of fundamental concept of concurrency and parallelism.

"},{"location":"langdives/Java/Threads/#concurrency-and-parallelism","title":"Concurrency and Parallelism","text":"

Concurrency: Multiple tasks start, run, and complete in overlapping time periods (not necessarily simultaneously).

Parallelism: Multiple tasks run exactly at the same time (requires multi-core processors).

We have another article where we gone through fundamentals of concurrency and parallelism in depth though we cover some of the stuff here to but its recommeneded to go through this artice Concurrency and Parallelism

Java achieves both using threads, thread pools, and various libraries such as Executors, Fork/Join Framework, and Streams API, We will go through them one by one and in this article we mostly cover Threads.

"},{"location":"langdives/Java/Threads/#what-is-a-thread","title":"What is a Thread?","text":"

A thread is a lightweight sub-process. A Java program has at least one thread \u2014 the main thread, which starts with the main() method. You can create additional threads to execute code concurrently. Each thread shares the same process memory, but has its own stack, registers, and program counter.

"},{"location":"langdives/Java/Threads/#how-to-create","title":"How to Create ?","text":"

You can create a thread in two ways:

Extending the Thread class
class MyThread extends Thread {\n    public void run() {\n        System.out.println(\"Thread running: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        MyThread t1 = new MyThread();\n        t1.start();  // Start the thread\n    }\n}\n
Implementing the Runnable interface
class MyRunnable implements Runnable {\n    public void run() {\n        System.out.println(\"Runnable running: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        Thread t1 = new Thread(new MyRunnable());\n        t1.start();  // Start the thread\n    }\n}\n

When to Use ?

"},{"location":"langdives/Java/Threads/#thread-lifecycle","title":"Thread Lifecycle","text":"

A thread in Java goes through the following states:

Thread Lifecycle

Example: Thread Lifecycle
class MyThread extends Thread {\n    public void run() {\n        System.out.println(\"Running thread: \" + Thread.currentThread().getName());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        MyThread t1 = new MyThread();  // NEW\n        t1.start();  // RUNNABLE\n\n        // Join to wait for the thread to complete\n        t1.join();  // Terminated once finished\n        System.out.println(\"Thread has terminated.\");\n    }\n}\n

Note

"},{"location":"langdives/Java/Threads/#daemon-threads","title":"Daemon Threads","text":"

A daemon thread is a background thread that provides support services, like the garbage collector. It does not prevent the JVM from shutting down once all user threads are completed.

Example of Daemon Thread
class DaemonThread extends Thread {\n    public void run() {\n        if (Thread.currentThread().isDaemon()) {\n            System.out.println(\"This is a daemon thread.\");\n        } else {\n            System.out.println(\"This is a user thread.\");\n        }\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        DaemonThread t1 = new DaemonThread();\n        t1.setDaemon(true);  // Set as daemon thread\n        t1.start();\n\n        DaemonThread t2 = new DaemonThread();\n        t2.start();\n    }\n}\n

When to use Daemon Threads ?

For background tasks like logging, garbage collection, or monitoring services.

"},{"location":"langdives/Java/Threads/#thread-priority","title":"Thread Priority","text":"

Java assigns a priority to each thread, ranging from 1 (MIN_PRIORITY) to 10 (MAX_PRIORITY). The default priority is 5 (NORM_PRIORITY). Thread priority affects scheduling, but it\u2019s platform-dependent \u2014 meaning it doesn\u2019t guarantee execution order.

Setting Thread Priority Example
class PriorityThread extends Thread {\n    public void run() {\n        System.out.println(\"Running thread: \" + Thread.currentThread().getName() +\n                        \" with priority: \" + Thread.currentThread().getPriority());\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        PriorityThread t1 = new PriorityThread();\n        PriorityThread t2 = new PriorityThread();\n\n        t1.setPriority(Thread.MIN_PRIORITY);  // Priority 1\n        t2.setPriority(Thread.MAX_PRIORITY);  // Priority 10\n\n        t1.start();\n        t2.start();\n    }\n}\n

When to use Priority Threads

Only when certain tasks should have preferential scheduling over others. However, Java thread scheduling is not guaranteed, so don't rely solely on priority.

"},{"location":"langdives/Java/Threads/#thread-synchronization","title":"Thread Synchronization","text":"

When multiple threads access shared resources (like variables), synchronization ensures that only one thread modifies the resource at a time. Use the synchronized keyword to prevent race conditions.

Synchronization Example
class Counter {\n    private int count = 0;\n\n    public synchronized void increment() {\n        count++;  // Only one thread can increment at a time\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) throws InterruptedException {\n        Counter counter = new Counter();\n\n        Thread t1 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        Thread t2 = new Thread(() -> {\n            for (int i = 0; i < 1000; i++) counter.increment();\n        });\n\n        t1.start();\n        t2.start();\n        t1.join();\n        t2.join();\n\n        System.out.println(\"Final count: \" + counter.getCount());\n    }\n}\n

When to use Synchronization ?

When multiple threads access critical sections of code to avoid inconsistent data.

"},{"location":"langdives/Java/Threads/#inter-thread-communication","title":"Inter-thread Communication","text":"

Java allows threads to communicate using wait-notify methods, avoiding busy waiting.

Inter-thread Communication Example
class SharedResource {\n    private int value;\n    private boolean available = false;\n\n    public synchronized void produce(int val) throws InterruptedException {\n        while (available) {\n            wait();  // Wait if value is already available\n        }\n        value = val;\n        available = true;\n        System.out.println(\"Produced: \" + value);\n        notify();  // Notify the consumer thread\n    }\n\n    public synchronized void consume() throws InterruptedException {\n        while (!available) {\n            wait();  // Wait if value is not available\n        }\n        System.out.println(\"Consumed: \" + value);\n        available = false;\n        notify();  // Notify the producer thread\n    }\n}\n\npublic class Main {\n    public static void main(String[] args) {\n        SharedResource resource = new SharedResource();\n\n        Thread producer = new Thread(() -> {\n            try {\n                for (int i = 1; i <= 5; i++) resource.produce(i);\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        Thread consumer = new Thread(() -> {\n            try {\n                for (int i = 1; i <= 5; i++) resource.consume();\n            } catch (InterruptedException e) {\n                Thread.currentThread().interrupt();\n            }\n        });\n\n        producer.start();\n        consumer.start();\n    }\n}\n
"},{"location":"langdives/Java/Threads/#thread-local-variables","title":"Thread-Local Variables","text":"

ThreadLocal provides a way to create thread-isolated variables. Each thread gets its own copy of the variable, and changes made by one thread do not affect others. This is useful when you don\u2019t want threads to share a common state.

ThreadLocal Usage Example
public class ThreadLocalExample {\n    private static ThreadLocal<Integer> threadLocal = ThreadLocal.withInitial(() -> 1);\n\n    public static void main(String[] args) {\n        Thread t1 = new Thread(() -> {\n            threadLocal.set(100);\n            System.out.println(\"Thread 1: \" + threadLocal.get());\n        });\n\n        Thread t2 = new Thread(() -> {\n            threadLocal.set(200);\n            System.out.println(\"Thread 2: \" + threadLocal.get());\n        });\n\n        t1.start();\n        t2.start();\n    }\n}\n

When to use ?

Useful in multi-threaded environments (like database transactions) where each thread needs its own context without interference from other threads.

"},{"location":"langdives/Java/Threads/#volatile-variables","title":"Volatile Variables","text":"

The volatile keyword ensures visibility of changes to variables across threads. Without volatile, thread-local caches may not reflect the latest changes made by other threads.

Volatile Example
public class VolatileExample {\n    private static volatile boolean running = true;\n\n    public static void main(String[] args) {\n        Thread t = new Thread(() -> {\n            while (running) {\n                // Busy-wait\n            }\n            System.out.println(\"Thread stopped.\");\n        });\n\n        t.start();\n\n        try { Thread.sleep(1000); } catch (InterruptedException e) { }\n        running = false;  // Change will be visible to other threads\n    }\n}\n

When to Use Volatile

Use volatile for variables accessed by multiple threads without needing synchronization (e.g., flags).

"},{"location":"langdives/Java/Threads/#when-to-use-volatile","title":"When to Use volatile ?","text":" Example Where volatile is Necessary
class VolatileExample {\n    private volatile boolean running = true;\n\n    public void stop() {\n        running = false;  // Change becomes immediately visible to other threads\n    }\n\n    public void run() {\n        while (running) {\n            // Do something\n        }\n        System.out.println(\"Thread stopped.\");\n    }\n\n    public static void main(String[] args) throws InterruptedException {\n        VolatileExample example = new VolatileExample();\n\n        Thread t = new Thread(example::run);\n        t.start();\n\n        Thread.sleep(1000);\n        example.stop();  // Stop the thread\n    }\n}\n
Explanation

Here, volatile ensures that the change to running made by the stop() method is immediately visible to the thread executing run(). Without volatile, the run() thread might never see the change and keep running indefinitely.

"},{"location":"langdives/Java/Threads/#when-not-to-use-volatile","title":"When Not to Use volatile ?","text":" Problem with Volatile for Non-Atomic Operations
class Counter {\n    private volatile int count = 0;\n\n    public void increment() {\n        count++;  // Not atomic! Two threads can still read the same value.\n    }\n\n    public int getCount() {\n        return count;\n    }\n}\n

Issue

Even though count is marked volatile, count++ is not atomic. Two threads could read the same value and increment it, leading to lost updates. To fix this, use synchronized or AtomicInteger.

"},{"location":"langdives/Java/Threads/#volatile-vs-synchronized","title":"Volatile vs Synchronized","text":""},{"location":"langdives/Java/Threads/#no-synchronized-or-volatile","title":"No synchronized or volatile ?","text":"

If you don\u2019t use volatile or synchronized, some dangerous scenarios can occur. Like this:

Example
class SharedResource {\n    private boolean available = false;\n\n    public void produce() {\n        available = true;  // Change not guaranteed to be visible immediately\n    }\n\n    public void consume() {\n        while (!available) {\n            // Busy-waiting, might never see the change to `available`\n        }\n        System.out.println(\"Consumed!\");\n    }\n}\n

Problem

If available is not marked volatile, the change made by produce() might not be visible to the consume() thread immediately. The consumer thread might be stuck in an infinite loop because it doesn't see the latest value of available.

Note

"},{"location":"langdives/Java/Threads/#synchronized-over-volatile","title":"Synchronized over volatile ?","text":"

Let's go through an example where its okay to use just synchroinized instead of volatile

Example
public synchronized void produce(int val) throws InterruptedException {\n    while (available) {\n        wait();  // Wait if value is already available\n    }\n    value = val;\n    available = true;\n    System.out.println(\"Produced: \" + value);\n    notify();  // Notify the consumer thread\n}\n

Synchronized Keyword:

Wait-Notify Mechanism:

Because this code uses synchronized methods and wait-notify, the necessary memory visibility is achieved without needing volatile.

"},{"location":"langdives/Java/Threads/#differences","title":"Differences","text":"Aspect volatile synchronized Visibility Ensures visibility of changes. Ensures visibility and atomicity. Atomicity Not guaranteed. Guaranteed (only one thread at a time). Performance Faster (no locking). Slower (locking involved). Use Case For flags, simple state updates. For complex operations, critical sections. Overhead Low (no blocking). High (involves blocking and context switches)."},{"location":"langdives/Java/Threads/#thread-memory","title":"Thread Memory","text":"

The memory consumption per thread and the maximum number of threads in Java depend on several factors, such as:

"},{"location":"langdives/Java/Threads/#memory-used-by-thread","title":"Memory used by Thread","text":"

Each Java thread consumes two key areas of memory:

Thread Stack Memory: Each thread gets its own stack, which holds Local variables (primitives, references), Method call frames, Intermediate results during method execution.

Note

The default stack size depends on the JVM and platform:

You can change the stack size with the -Xss JVM option:

java -Xss512k YourProgram\n

Native Thread Metadata: In addition to stack memory, the OS kernel allocates metadata per thread (for thread control structures). This varies by platform but is typically in the range of 8 KB to 16 KB per thread.

"},{"location":"langdives/Java/Threads/#memory-per-thread","title":"Memory per Thread ?","text":"

The typical memory consumption per thread:

Thus, a single thread could use ~1 MB to 1.1 MB of memory.

"},{"location":"langdives/Java/Threads/#max-threads-you-can-create","title":"Max Threads you Can Create ?","text":"

The number of threads you can create depends on:

Practical Calculation Example

Let's say:

Maximum threads = 6 GB / 1 MB per thread = ~6000 threads.

OS Limits on Threads

Even if memory allows for thousands of threads, the OS imposes limits:

"},{"location":"langdives/Java/Threads/#too-many-threads-created","title":"Too Many Threads Created ?","text":""},{"location":"langdives/Java/Threads/#optimizing-thread-usage","title":"Optimizing Thread Usage","text":"

Rather than creating many threads manually, use thread pools to manage a fixed number of threads efficiently:

Thread Pools Example
import java.util.concurrent.ExecutorService;\nimport java.util.concurrent.Executors;\n\npublic class ThreadPoolExample {\n    public static void main(String[] args) {\n        ExecutorService executor = Executors.newFixedThreadPool(10);\n        for (int i = 0; i < 100; i++) {\n            executor.submit(() -> {\n                System.out.println(\"Running thread: \" + Thread.currentThread().getName());\n            });\n        }\n        executor.shutdown();\n    }\n}\n

Thread pools reuse threads, reducing memory usage and improving performance.

"},{"location":"langdives/Java/Threads/#how-to-increase-max-thread","title":"How to Increase Max Thread ?","text":"

On Linux, you can increase the maximum threads per process

Check current limits
ulimit -u  # Max user processes\n
Increase limit (temporary)
ulimit -u 65535\n
Permanent change: Edit '/etc/security/limits.conf' and add
your_user_name  hard  nproc  65535\nyour_user_name  soft  nproc  65535\n

Key points

"},{"location":"langdives/Java/Spring/","title":"Spring","text":""},{"location":"langdives/Java/Spring/#what-is-spring","title":"What is Spring ?","text":"

Spring is a popular, open-source Java-based framework used to create enterprise-level applications. It provides a comprehensive programming and configuration model that simplifies Java development. At its core, Spring focuses on dependency injection and inversion of control (IoC), providing an abstraction over Java's complexity.

"},{"location":"langdives/Java/Spring/#core-goals-of-spring","title":"Core goals of Spring","text":""},{"location":"langdives/Java/Spring/#ecosystem-overview","title":"Ecosystem Overview","text":"

The Spring ecosystem consists of various projects for different use cases:

"},{"location":"langdives/Java/Spring/SpringAnnotations/","title":"Spring Annotations","text":"

A comprehensive list of Spring Boot annotations, covering core Spring Boot, configuration, web, data, testing, and more. I'll organize them by categories with keys (annotation names) and values (purpose/use cases) for easy reference.

"},{"location":"langdives/Java/Spring/SpringAnnotations/#core-annotations","title":"Core Annotations","text":"Annotation Purpose/Use Case @SpringBootApplication Main entry point for a Spring Boot application. Combines @Configuration, @ComponentScan, and @EnableAutoConfiguration. @EnableAutoConfiguration Enables automatic configuration of Spring beans based on the classpath and defined properties. @ComponentScan Scans the package and its sub-packages for Spring components (e.g., @Component, @Service). @Configuration Marks a class as a source of bean definitions. Used to define Spring beans programmatically. @Bean Declares a method as a Spring bean, registered in the application context. @Import Imports additional configuration classes. @ImportResource Loads bean definitions from external XML configuration files."},{"location":"langdives/Java/Spring/SpringAnnotations/#web-and-rest-annotations","title":"Web and REST Annotations","text":"Annotation Purpose/Use Case @RestController Marks a class as a REST API controller. Combines @Controller and @ResponseBody. @Controller Marks a class as a web controller. Works with view templates (like Thymeleaf). @RequestMapping Maps HTTP requests to specific handler methods or classes. Can be used on classes or methods. @GetMapping Maps HTTP GET requests to specific handler methods. @PostMapping Maps HTTP POST requests to specific handler methods. @PutMapping Maps HTTP PUT requests to specific handler methods. @DeleteMapping Maps HTTP DELETE requests to specific handler methods. @PatchMapping Maps HTTP PATCH requests to specific handler methods. @RequestBody Binds the HTTP request body to a Java object. Used in REST controllers. @ResponseBody Binds the return value of a method directly to the HTTP response body. @RequestParam Binds HTTP query parameters to method arguments. @PathVariable Binds URI template variables to method parameters. @RequestHeader Binds HTTP request headers to method parameters. @CookieValue Binds cookie values to method parameters. @ModelAttribute Binds form data to a model object. @SessionAttributes Declares session-scoped model attributes. @CrossOrigin Enables cross-origin requests (CORS) for specific endpoints."},{"location":"langdives/Java/Spring/SpringAnnotations/#jpa-jdbc-annotations","title":"JPA, JDBC Annotations","text":"Annotation Purpose/Use Case @Entity Marks a class as a JPA entity. @Table Specifies the database table for a JPA entity. @Id Marks a field as the primary key of a JPA entity. @GeneratedValue Specifies how the primary key value should be generated. @Column Specifies the mapping of a field to a database column. @OneToOne Establishes a one-to-one relationship between entities. @OneToMany Establishes a one-to-many relationship between entities. @ManyToOne Establishes a many-to-one relationship between entities. @ManyToMany Establishes a many-to-many relationship between entities. @JoinColumn Specifies the foreign key column for a relationship. @Query Defines a custom JPQL or SQL query on a repository method. @Transactional Marks a method or class as transactional. Ensures ACID properties in data operations. @EnableJpaRepositories Enables JPA repositories for data access. @Repository Marks a class as a data repository. @EnableTransactionManagement Enables declarative transaction management."},{"location":"langdives/Java/Spring/SpringAnnotations/#security-annotations","title":"Security Annotations","text":"Annotation Purpose/Use Case @EnableWebSecurity Enables Spring Security for web applications. @EnableGlobalMethodSecurity Enables method-level security annotations like @PreAuthorize and @PostAuthorize. @PreAuthorize Applies authorization logic before a method is invoked. @PostAuthorize Applies authorization logic after a method has executed. @Secured Secures a method by roles (deprecated in favor of @PreAuthorize). @RolesAllowed Specifies which roles are allowed to access a method. @WithMockUser Simulates a user for testing security."},{"location":"langdives/Java/Spring/SpringAnnotations/#testing-annotations","title":"Testing Annotations","text":"Annotation Purpose/Use Case @SpringBootTest Runs integration tests for a Spring Boot application. Loads the full application context. @WebMvcTest Tests only web layer components (e.g., controllers). @DataJpaTest Tests only JPA repositories. Configures an in-memory database. @MockBean Replaces a bean with a mock during tests. @SpyBean Replaces a bean with a spy during tests. @TestConfiguration Provides additional bean configurations for tests. @BeforeEach Runs before each test method in a test class. @AfterEach Runs after each test method in a test class."},{"location":"langdives/Java/Spring/SpringAnnotations/#profiles-annotations","title":"Profiles Annotations","text":"Annotation Purpose/Use Case @ConfigurationProperties Binds external configuration properties to a Java bean. @EnableConfigurationProperties Enables support for @ConfigurationProperties beans. @Profile Specifies the profile under which a bean is active (e.g., dev, prod). @Value Injects a value from the properties or environment. @PropertySource Loads properties from an external file. @Environment Provides access to the current environment settings."},{"location":"langdives/Java/Spring/SpringAnnotations/#actuator-metrics-annotations","title":"Actuator & Metrics Annotations","text":"Annotation Purpose/Use Case @Endpoint Defines a custom Actuator endpoint. @ReadOperation Marks a method as a read operation for an Actuator endpoint. @WriteOperation Marks a method as a write operation for an Actuator endpoint. @DeleteOperation Marks a method as a delete operation for an Actuator endpoint. @Timed Measures the execution time of a method. @Gauge Exposes a gauge metric to Actuator. @Metered Marks a method to be counted as a metric (deprecated in favor of @Timed)."},{"location":"langdives/Java/Spring/SpringAnnotations/#microservices-annotations","title":"Microservices Annotations","text":"Annotation Purpose/Use Case @EnableDiscoveryClient Enables service registration with Eureka, Consul, or Zookeeper. @EnableFeignClients Enables Feign clients for inter-service communication. @CircuitBreaker Implements circuit-breaking logic using Resilience4j. @Retryable Enables retry logic for a method. @LoadBalanced Enables load balancing for REST clients."},{"location":"langdives/Java/Spring/SpringAnnotations/#miscellaneous-annotations","title":"Miscellaneous Annotations","text":"Annotation Purpose/Use Case @Conditional Conditionally registers a bean based on custom logic. @Async Marks a method to run asynchronously. @Scheduled Schedules a method to run at fixed intervals or cron expressions. @EventListener Marks a method to listen for application events. @Cacheable Caches the result of a method. @CacheEvict Evicts entries from a cache."},{"location":"langdives/Java/Spring/SpringAnnotations/#summary","title":"Summary","text":"

This is a comprehensive list of all major Spring Boot annotations, categorized by their functionality. With these annotations, Spring Boot makes it easier to develop applications by reducing boilerplate code, automating configuration, and offering powerful tools for testing, security, and microservices development.

"},{"location":"langdives/Java/Spring/SpringBoot/","title":"Spring Boot","text":"

This article covers how Spring Boot automates configurations, deals with microservices, and manages monitoring, security, and performance.

"},{"location":"langdives/Java/Spring/SpringBoot/#what-is-spring-boot","title":"What is Spring Boot ?","text":"

Spring Boot is an extension of the Spring Framework that simplifies the development of Java applications by offering:

The goal of Spring Boot is to help developers build stand-alone, production-grade applications quickly and with less fuss.

"},{"location":"langdives/Java/Spring/SpringBoot/#application-architecture","title":"Application Architecture","text":"

A Spring Boot application consists of

"},{"location":"langdives/Java/Spring/SpringBoot/#key-components","title":"Key Components","text":""},{"location":"langdives/Java/Spring/SpringBoot/#annotations","title":"Annotations","text":""},{"location":"langdives/Java/Spring/SpringBoot/#starters","title":"Starters","text":"

Starters are pre-configured dependency bundles for common functionalities

"},{"location":"langdives/Java/Spring/SpringBoot/#how-auto-config-works","title":"How Auto-Config Works ?","text":"

Spring Boot uses @EnableAutoConfiguration to detect dependencies and automatically configure beans for you.

For example: If spring-boot-starter-data-jpa is present, it will

  1. Configure a DataSource.
  2. Configure an EntityManagerFactory to manage JPA entities.
  3. Enable transaction management using @EnableTransactionManagement.

How to Debug Auto-Configuration:

"},{"location":"langdives/Java/Spring/SpringBoot/#application-lifecycle","title":"Application Lifecycle","text":"

Startup: Spring Boot applications initialize with SpringApplication.run(), The lifecycle involves loading beans, initializing contexts, and wiring dependencies.

Embedded Server: By default, Spring Boot uses Tomcat as the embedded server. Others include Jetty and Undertow, The server listens on a configurable port (default: 8080).

Shutdown: Spring Boot provides graceful shutdown using @PreDestroy or hooks via SpringApplication.addShutdownHook().

"},{"location":"langdives/Java/Spring/SpringBoot/#configuration-in-depth","title":"Configuration in Depth","text":""},{"location":"langdives/Java/Spring/SpringBoot/#using-applicationproperties","title":"Using application.properties","text":"

Spring Boot applications are configured using either application.properties

Examples of application.properties
server.port=8081\nspring.datasource.url=jdbc:mysql://localhost:3306/mydb\nspring.datasource.username=root\nspring.datasource.password=password\n
"},{"location":"langdives/Java/Spring/SpringBoot/#using-applicationyml","title":"Using application.yml","text":"

Using Profiles (e.g., Dev vs. Prod) in application.yml

Example of application.yml
server:\n  port: 8080\nspring:\n  profiles:\n    active: dev\n\n---\nspring:\n  profiles: dev\n  datasource:\n    url: jdbc:h2:mem:testdb\n\n---\nspring:\n  profiles: prod\n  datasource:\n    url: jdbc:mysql://localhost:3306/proddb\n

You can activate profiles programmatically or through command-line options

$ java -Dspring.profiles.active=prod -jar myapp.jar\n
"},{"location":"langdives/Java/Spring/SpringBoot/#custom-configuration","title":"Custom Configuration","text":"

You can define your own custom properties and inject them into beans using @ConfigurationProperties.

Example
@ConfigurationProperties(prefix = \"custom\")\npublic class CustomConfig {\n    private String name;\n    private int timeout;\n\n    // Getters and Setters\n}\n
application.properties
custom.name=SpringApp\ncustom.timeout=5000\n
Inject the CustomConfig bean
@Autowired\nprivate CustomConfig customConfig;\n
"},{"location":"langdives/Java/Spring/SpringBoot/#embedded-server-customization","title":"Embedded Server Customization","text":"

You can customize the embedded server by configuring the EmbeddedServletContainerFactory.

Changing the Tomcat thread pool size
@Bean\npublic ConfigurableServletWebServerFactory webServerFactory() {\n    TomcatServletWebServerFactory factory = new TomcatServletWebServerFactory();\n    factory.setPort(9090);\n    factory.addConnectorCustomizers(connector -> {\n        connector.setAttribute(\"maxThreads\", 200);\n    });\n    return factory;\n}\n
"},{"location":"langdives/Java/Spring/SpringBoot/#actuator-and-monitoring","title":"Actuator and Monitoring","text":""},{"location":"langdives/Java/Spring/SpringBoot/#spring-boot-actuator","title":"Spring Boot Actuator","text":"

Exposes application management and monitoring endpoints.

Some common endpoints:

Securing Actuator Endpoints
management:\n  endpoints:\n    web:\n      exposure:\n        include: health, info\n  security:\n    enabled: true\n

You can customize or add your own metrics by using @Timed or MeterRegistry.

"},{"location":"langdives/Java/Spring/SpringBoot/#security","title":"Security","text":""},{"location":"langdives/Java/Spring/SpringBoot/#building-microservices","title":"Building Microservices","text":""},{"location":"langdives/Java/Spring/SpringBoot/#testing","title":"Testing","text":""},{"location":"langdives/Java/Spring/SpringBoot/#summary","title":"Summary","text":"

Spring Boot streamlines the development process by providing auto-configuration, embedded servers, and a production-ready environment. It empowers developers to build and deploy microservices quickly, backed by powerful features like Spring Security, Spring Data, Actuator, and more. With its opinionated defaults and deep customizability, Spring Boot strikes a balance between simplicity and flexibility making it ideal for both beginners and advanced developers.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/","title":"Spring Core Framework","text":"

the foundation of the entire Spring ecosystem. We'll explore each component and mechanism in detail, so by the end, you\u2019ll have a thorough understanding of how Spring Core works, including the IoC container, Dependency Injection (DI), Beans, ApplicationContext, Bean Lifecycle, AOP, and more.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-spring-core-framework","title":"What is Spring Core Framework","text":"

The Spring Core Framework is the heart of the Spring ecosystem. It provides the essential features required to build Java applications, with a focus on dependency injection (DI) and inversion of control (IoC). At its core, Spring aims to eliminate the complexities of creating objects, managing dependencies, and wiring different components together.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#modules","title":"Modules","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-core","title":"Spring Core","text":"

The foundational module that provides the IoC container and the basic tools for dependency injection (DI), It includes the core interfaces and classes like BeanFactory, ApplicationContext, BeanPostProcessor, BeanDefinition, and others.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-beans","title":"Spring Beans","text":"

Manages the configuration, creation, and lifecycle of Spring beans.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-context","title":"Spring Context","text":"

Provides a runtime environment for applications using the IoC container. It builds on the Spring Core and adds additional functionality like events and internationalization (i18n).

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-spel","title":"Spring SpEL","text":"

SpEL(Spring Expression Language) A powerful expression language that can be used to dynamically query or manipulate bean properties.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#core-concepts","title":"Core Concepts","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#inversion-of-control-ioc","title":"Inversion of Control (IoC)","text":"

The Inversion of Control (IoC) principle is at the core of the Spring Framework. It shifts the control of object creation and management from the developer to the IoC container, promoting loose coupling and enhancing testability. Let\u2019s break it down conceptually and then dive into the Spring implementation.

In traditional programming, the application code creates and manages its dependencies directly.

Example
public class OrderService {\n    private PaymentService paymentService;\n\n    public OrderService() {\n        this.paymentService = new PaymentService(); // Tight coupling\n    }\n}\n

Explanation

IoC Solution With Inversion of Control (IoC), the responsibility of creating the PaymentService is \"inverted\" and delegated to the Spring IoC container. Now, the IoC container injects the dependency into OrderService.

IoC Solution Example
@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;  // Dependency injection\n    }\n}\n

Explanation

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-ioc-containers","title":"Types of IoC Containers","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#beanfactory","title":"BeanFactory","text":"

BeanFactory is the basic IoC container in Spring. It provides basic dependency injection and bean management functionality.

Features of BeanFactory:

Usage Example
BeanFactory factory = new XmlBeanFactory(new FileSystemResource(\"beans.xml\"));\nOrderService service = (OrderService) factory.getBean(\"orderService\");\n

However, BeanFactory is rarely used now because it lacks advanced features like event propagation, internationalization, and eager initialization, which are provided by ApplicationContext.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#applicationcontext","title":"ApplicationContext","text":"

ApplicationContext is a more powerful IoC container that extends BeanFactory. It is widely used in modern Spring applications because of its rich features.

Features of ApplicationContext:

Usage Example
ApplicationContext context = new ClassPathXmlApplicationContext(\"beans.xml\");\nOrderService service = context.getBean(OrderService.class);\n

In most cases, developers use AnnotationConfigApplicationContext or Spring Boot to load configurations and manage beans.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beanfactory-vs-applicationcontext","title":"BeanFactory vs ApplicationContext","text":"Aspect BeanFactory ApplicationContext Bean Initialization Lazy (on-demand) Eager (at startup) Event Handling Not supported Supports event handling Internationalization Not supported Supports i18n Bean Lifecycle Hooks Basic Full support for lifecycle hooks Common Usage Legacy or constrained environments Modern Spring applications"},{"location":"langdives/Java/Spring/SpringCoreFramework/#ioc-flow","title":"IoC Flow","text":"

Step-by-Step

  1. Define Beans and Dependencies: Beans can be defined in XML, Java Configuration, or through Annotations like @Component.

    <bean id=\"paymentService\" class=\"com.example.PaymentService\"/>\n<bean id=\"orderService\" class=\"com.example.OrderService\">\n    <constructor-arg ref=\"paymentService\"/>\n</bean>\n

  2. Spring IoC Container Loads Configuration: The IoC container reads the configuration (XML, annotations, or Java-based) during startup.

  3. Dependency Injection (DI): The IoC container identifies the dependencies and injects them using constructor, setter, or field injection.

  4. Bean Initialization: The IoC container initializes all necessary beans (eagerly or lazily).

  5. Bean Usage: The beans are now available for use by the application.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#dependency-injection-di","title":"Dependency Injection (DI)","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-di","title":"What is DI ?","text":"

Dependency Injection (DI) is a pattern where objects are provided with their dependencies at runtime by the IoC container instead of creating them directly. Spring supports multiple types of dependency injection:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-di","title":"Types of DI","text":"

Constructor Injection: Dependencies are provided through the class constructor, Recommended for mandatory dependencies.

Constructor Injection Example
@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n

Setter Injection: Dependencies are injected using setter methods, Useful for optional dependencies.

Setter Injection Example
@Component\npublic class OrderService {\n    private PaymentService paymentService;\n\n    @Autowired\n    public void setPaymentService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n

Field Injection: Dependencies are injected directly into class fields, Not recommended since it makes unit testing harder.

Field Injection
@Component\npublic class OrderService {\n    @Autowired\n    private PaymentService paymentService;\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#why-ioc-and-di-are-essential","title":"Why IoC and DI are Essential","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#challenges-with-ioc","title":"Challenges with IoC","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#full-ioc-implementation","title":"Full IoC Implementation","text":"

Let\u2019s look at a complete example using Spring Core with constructor injection:

Full IoC Implementation Example Java Configuration Example
@Configuration\npublic class AppConfig {\n\n    @Bean\n    public PaymentService paymentService() {\n        return new PaymentService();\n    }\n\n    @Bean\n    public OrderService orderService(PaymentService paymentService) {\n        return new OrderService(paymentService);\n    }\n}\n
OrderService and PaymentService
@Component\npublic class PaymentService {\n    public void processPayment() {\n        System.out.println(\"Payment processed.\");\n    }\n}\n\n@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n\n    public void placeOrder() {\n        System.out.println(\"Order placed.\");\n        paymentService.processPayment();\n    }\n}\n
Main Class to Run
public class Main {\n    public static void main(String[] args) {\n        ApplicationContext context = new AnnotationConfigApplicationContext(AppConfig.class);\n        OrderService orderService = context.getBean(OrderService.class);\n        orderService.placeOrder();\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beans-and-ioc-container","title":"Beans and IoC Container","text":"

Spring beans are the building blocks of any Spring application. They represent the objects that the Spring IoC container manages throughout their lifecycle. Understanding beans and their lifecycle is critical for mastering the Spring Core framework. Let\u2019s explore everything about beans\u2014from creation, scopes, lifecycle, initialization, destruction, and more\u2014in detail.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#what-is-a-bean","title":"What is a Bean ?","text":"

A bean in Spring is an object that is instantiated, assembled, and managed by the IoC container. The container creates, initializes, and wires these beans, ensuring that all dependencies are injected as needed. Beans are usually defined using:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#configuring-beans","title":"Configuring Beans","text":"

Spring provides multiple ways to declare beans and register them with the IoC container:

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#xml-based-configuration","title":"XML-based Configuration","text":"

Traditional way of defining beans using XML files.

XML Config Example
<beans xmlns=\"http://www.springframework.org/schema/beans\" \n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n    xsi:schemaLocation=\"http://www.springframework.org/schema/beans \n    http://www.springframework.org/schema/beans/spring-beans.xsd\">\n\n    <bean id=\"paymentService\" class=\"com.example.PaymentService\"/>\n    <bean id=\"orderService\" class=\"com.example.OrderService\">\n        <constructor-arg ref=\"paymentService\"/>\n    </bean>\n</beans>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#java-based-configuration","title":"Java-based Configuration","text":"

Spring allows you to use Java classes to define beans. This is cleaner and avoids XML boilerplate.

Java Config Example
@Configuration\npublic class AppConfig {\n\n    @Bean\n    public PaymentService paymentService() {\n        return new PaymentService();\n    }\n\n    @Bean\n    public OrderService orderService() {\n        return new OrderService(paymentService());\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#component-scanning-with-annotations","title":"Component Scanning with Annotations","text":"

You can annotate classes with @Component, @Service, @Repository, or @Controller. Spring automatically detects these beans if @ComponentScan is enabled.

Annotations Example
@Component\npublic class PaymentService { }\n\n@Component\npublic class OrderService {\n    private final PaymentService paymentService;\n\n    @Autowired\n    public OrderService(PaymentService paymentService) {\n        this.paymentService = paymentService;\n    }\n}\n
@Configuration\n@ComponentScan(basePackages = \"com.example\")\npublic class AppConfig { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-scopes","title":"Bean Scopes","text":"

The scope of a bean defines the lifecycle and visibility of that bean within the Spring IoC container.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#types-of-bean-scopes","title":"Types of Bean Scopes","text":"

singleton (default): A single instance of the bean is created and shared across the entire application, Used for stateless beans.

Example
@Scope(\"singleton\")\n@Component\npublic class SingletonBean { }\n

prototype: A new instance is created every time the bean is requested, Useful for stateful objects or temporary tasks.

Example
@Scope(\"prototype\")\n@Component\npublic class PrototypeBean { }\n

request: A new bean instance is created for each HTTP request, Used in web applications.

session: A single instance is created per HTTP session.

globalSession: A global session scope for applications using portlets.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-lifecycle","title":"Bean Lifecycle","text":"

Each Spring bean goes through several lifecycle phases, starting from instantiation to destruction. The Spring IoC container manages this lifecycle internally.

Bean Lifecycle Phases

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-lifecycle-callbacks","title":"Bean Lifecycle Callbacks","text":"

InitializingBean Interface: If a bean implements the InitializingBean interface, it must override the afterPropertiesSet() method, which is called after all properties are set.

Example
public class MyService implements InitializingBean {\n    @Override\n    public void afterPropertiesSet() {\n        System.out.println(\"MyService is initialized.\");\n    }\n}\n

DisposableBean Interface: If a bean implements the DisposableBean interface, it must override the destroy() method, which is called during the destruction phase.

Example
public class MyService implements DisposableBean {\n    @Override\n    public void destroy() {\n        System.out.println(\"MyService is being destroyed.\");\n    }\n}\n

Using @PostConstruct and @PreDestroy: These annotations are the recommended way to manage initialization and destruction callbacks.

Example
@Component\npublic class MyService {\n\n    @PostConstruct\n    public void init() {\n        System.out.println(\"Initialization logic in @PostConstruct.\");\n    }\n\n    @PreDestroy\n    public void cleanup() {\n        System.out.println(\"Cleanup logic in @PreDestroy.\");\n    }\n}\n

Custom Initialization and Destruction Methods: You can also specify custom methods in the bean configuration.

Example (Java Config)
@Bean(initMethod = \"init\", destroyMethod = \"cleanup\")\npublic MyService myService() {\n    return new MyService();\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#eager-vs-lazy-initialization","title":"Eager vs. Lazy Initialization","text":""},{"location":"langdives/Java/Spring/SpringCoreFramework/#eager-initialization","title":"Eager Initialization","text":"

All singleton beans are created at the time of application startup (default behavior), Useful for performance since all dependencies are resolved upfront.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#lazy-initialization","title":"Lazy Initialization","text":"

Beans are created only when they are first requested, You can enable lazy initialization at the bean level using @Lazy.

Example
@Lazy\n@Component\npublic class LazyBean { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#di-and-bean-relationships","title":"DI and Bean Relationships","text":"

Spring IoC container resolves bean dependencies through constructor injection, setter injection, or field injection, You can specify bean dependencies explicitly using the depends-on attribute in XML.

Example (XML)
<bean id=\"databaseConnection\" class=\"com.example.DatabaseConnection\"/>\n<bean id=\"orderService\" class=\"com.example.OrderService\" depends-on=\"databaseConnection\"/>\n

This ensures that the databaseConnection bean is initialized before the orderService bean.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#beans-circular-dependencies","title":"Beans Circular Dependencies","text":"

A circular dependency occurs when two or more beans are mutually dependent on each other.

Example
@Component\npublic class A {\n    @Autowired\n    private B b;\n}\n\n@Component\npublic class B {\n    @Autowired\n    private A a;\n}\n

Spring tries to resolve circular dependencies using singleton beans by injecting proxies, but it fails for constructor injection. To avoid circular dependencies: - Refactor the code to reduce dependencies. - Use setter injection instead of constructor injection.

"},{"location":"langdives/Java/Spring/SpringCoreFramework/#bean-definition-inheritance","title":"Bean Definition Inheritance","text":"

Spring allows bean definitions to inherit properties from a parent bean. This helps reduce configuration duplication.

Example (XML)
<bean id=\"parentBean\" class=\"com.example.BaseService\">\n    <property name=\"name\" value=\"Base Service\"/>\n</bean>\n\n<bean id=\"childBean\" class=\"com.example.ChildService\" parent=\"parentBean\">\n    <property name=\"name\" value=\"Child Service\"/>\n</bean>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#aspect-oriented-programming","title":"Aspect-Oriented Programming","text":"

AOP allows you to separate cross-cutting concerns (like logging, security, or transaction management) from the business logic. In Spring, AOP is implemented using aspects, advice, and pointcuts.

AOP Example in Spring Define an Aspect
@Aspect\n@Component\npublic class LoggingAspect {\n    @Before(\"execution(* com.example.*.*(..))\")\n    public void logBefore(JoinPoint joinPoint) {\n        System.out.println(\"Before method: \" + joinPoint.getSignature().getName());\n    }\n}\n
Enable AOP
@Configuration\n@EnableAspectJAutoProxy\npublic class AppConfig { }\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-events","title":"Spring Events","text":"

Spring supports an event-driven model that allows you to build decoupled components. The ApplicationContext can publish events and allow listeners to respond to them.

Example of a Custom Event
public class CustomEvent extends ApplicationEvent {\n    public CustomEvent(Object source) {\n        super(source);\n    }\n}\n
@Component\npublic class CustomEventListener implements ApplicationListener<CustomEvent> {\n    @Override\n    public void onApplicationEvent(CustomEvent event) {\n        System.out.println(\"Received custom event: \" + event);\n    }\n}\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#spring-expression-lang-spel","title":"Spring Expression Lang (SpEL)","text":"

SpEL allows you to manipulate and query beans dynamically. It can be used inside XML or annotations.

Example of SpEL
<bean id=\"myBean\" class=\"com.example.MyClass\">\n    <property name=\"value\" value=\"#{2 + 3}\"/>\n</bean>\n
"},{"location":"langdives/Java/Spring/SpringCoreFramework/#summary","title":"Summary","text":""},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/","title":"Difference B/W Spring Framework & Boot","text":"

A detailed comparison table that covers all possible differences between Spring Boot and Spring Framework. This comparison covers everything from setup, configuration, embedded servers, web applications, testing, microservices support, and much more.

"},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/#differences","title":"Differences","text":"Category Spring Framework Spring Boot Purpose A comprehensive framework for building Java applications. An extension of Spring Framework to simplify configuration and create stand-alone applications. Setup Requires manual setup, including XML or Java-based configuration. Minimal setup with auto-configuration based on the classpath. Main Focus Provides flexibility and control over every aspect of the application. Focuses on rapid development with sensible defaults and opinions. Configuration Can use XML, Java-based, or annotation-based configuration. Uses annotations and properties/YAML files for configuration. Learning Curve Requires more learning time due to complexity. Easier to get started with for beginners due to auto-configuration and pre-built setups. Project Dependencies Requires managing multiple dependencies for each feature manually. Provides starters (e.g., spring-boot-starter-web) to include required dependencies. Embedded Server No embedded server support; WAR files must be deployed to external servers (Tomcat, Jetty, etc.). Comes with embedded servers (Tomcat, Jetty, or Undertow) for running stand-alone applications. Deployment Deploy WAR/EAR files to external servers. Runs applications directly as JAR files with embedded servers. Auto-Configuration No auto-configuration; requires manual configuration of components. Auto-configures components based on available classpath dependencies. Application Entry Point Relies on external servlet containers to manage the lifecycle. Uses @SpringBootApplication as the entry point with SpringApplication.run(). Microservices Support Not specialized for microservices; requires additional tools. Built with microservices architecture in mind. Supports Spring Cloud, Eureka, Feign, etc. Performance Requires more configuration to optimize performance. Better suited for lightweight and high-performance microservices. Profiles Supports profiles for environment-specific configurations but requires more setup. Supports profiles easily through application.yml or application.properties. Testing Provides JUnit support but requires manual configuration for context loading. Provides easy testing with @SpringBootTest, @MockBean, @DataJpaTest, and others. Security Uses Spring Security but requires manual integration. Integrates Spring Security easily with spring-boot-starter-security. Database Access Provides JDBC, JPA, ORM support, but requires more configuration. Simplifies database access with Spring Data JPA and auto-configuration of DataSource. Starters and Dependency Management Requires manual management of dependencies and configurations. Provides Spring Boot Starters that bundle all required dependencies for specific use cases. Template Engines Supports Thymeleaf, JSP, and others with manual setup. Supports template engines with starters (e.g., spring-boot-starter-thymeleaf). Command-Line Interface (CLI) No built-in CLI support. Provides Spring Boot CLI to run Groovy scripts for quick development. Actuator and Monitoring Requires external monitoring tools or custom configurations. Comes with Spring Boot Actuator to monitor application health, metrics, and endpoints. DevTools for Hot Reload Requires manual setup for hot reloading of code changes. Provides Spring Boot DevTools for hot reloading during development. Support for Reactive Programming Supports Spring WebFlux and Project Reactor (from version 5.x). Fully supports Spring WebFlux for reactive, non-blocking programming. Circuit Breakers & Resilience Requires integration with third-party libraries like Hystrix. Seamlessly integrates with Resilience4j and Spring Cloud for resilience. Integration with Cloud Platforms Requires Spring Cloud or manual setup for cloud integration. Seamlessly integrates with Spring Cloud for cloud-native development. Logging Configuration Requires manual configuration of logging frameworks (e.g., Log4j, SLF4J). Provides auto-configured logging using Logback by default. Health Checks and Metrics Requires manual configuration to expose health metrics. Provides Actuator endpoints (/actuator/health, /actuator/metrics) out-of-the-box. Web Framework Uses Spring MVC for building web applications. Uses Spring MVC or Spring WebFlux with easy setup through starters. Restful API Development Requires manual setup of controllers and components. Provides easy development with @RestController and auto-configuration of REST endpoints. Command-Line Arguments Support Requires manual handling of command-line arguments. Easily reads command-line arguments with SpringApplication or @Value. Caching Support Requires setting up EhCache, Guava, or other caching solutions manually. Provides easy caching configuration with @EnableCaching and auto-configuration. Internationalization (i18n) Supports i18n but requires more setup. Supports i18n with minimal configuration through application.properties. Job Scheduling Requires integration with Quartz or other scheduling libraries. Supports scheduling with @Scheduled and Task Executors. Dependency Injection (DI) Provides dependency injection with IoC container. Same as Spring Framework but simplifies it with auto-wiring using @Autowired. Backward Compatibility Must manually update configurations when upgrading versions. Provides backward compatibility with most Spring projects. Community and Ecosystem Large community and extensive ecosystem. Built on top of Spring Framework with additional tools for modern development."},{"location":"langdives/Java/Spring/SpringFrameworkVsSpringBoot/#summary","title":"Summary","text":"

Spring Boot simplifies Spring Framework by:

Spring Framework gives more control and flexibility but at the cost of manual setup and configuration.

Spring Boot is optimized for rapid development, especially for microservices and cloud-native applications.

Spring Framework is still relevant for legacy applications or when fine-grained control is necessary.

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/","title":"High Availability and Fault Tolerance","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#high-availability-ha","title":"High Availability (HA)","text":"

High Availability (HA) refers to a system or infrastructure's ability to remain operational and accessible for a very high percentage of time, minimizing downtime. In essence, it's a design principle used in IT to ensure that services, applications, or systems are continuously available, even in the event of hardware failures, software issues, or unexpected disruptions.

Key Aspects of High Availability

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-levels","title":"HA Levels","text":"

Availability is often expressed as a percentage. For instance, an uptime of 99.99% means the service is expected to be down for only 52 minutes in a year.

Common availability standards

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-use-cases","title":"HA Use Cases","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-challenges","title":"HA Challenges","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-when-to-use","title":"HA When to Use ?","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#fault-tolerance-ft","title":"Fault Tolerance (FT)","text":"

Fault Tolerance refers to the ability of a system, network, or application to continue functioning correctly even when one or more of its components fail. It ensures continuous operation without loss of service, despite hardware, software, or other types of faults occurring in the system. Fault tolerance plays a crucial role in ensuring high reliability and availability of critical systems.

Key Principles of Fault Tolerance

"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-techniques","title":"FT Techniques:","text":"
  1. Hardware Fault Tolerance:

    • RAID (Redundant Array of Independent Disks): Data is mirrored or striped across multiple hard drives to prevent data loss from disk failure.
    • Dual Power Supplies: Servers often include multiple power supplies to prevent failure if one unit fails.
    • Hot Swapping: Faulty components like disks or power units can be replaced without shutting down the system.
  2. Software Fault Tolerance:

    • Checkpoints and Rollbacks: Systems can save checkpoints periodically, and if an error occurs, they revert to the last known good state.
    • Replication in Distributed Systems: Critical services are duplicated across multiple servers to ensure that if one server fails, others take over.
  3. Network Fault Tolerance:

    • Multiple Network Paths: Routing data over multiple paths ensures that if one link fails, another path is used.
    • Load Balancers: They distribute network traffic across multiple servers or systems, ensuring no single point of failure.
  4. Error Detection and Recovery:

    • Watchdog Timers: Monitor system processes and restart them if they hang.
    • Checksums and Parity Checks: Verify data integrity and correct transmission errors.
"},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-real-world-examples","title":"FT Real-World Examples","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-challenges","title":"FT Challenges","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-in-contrast-to-resilience","title":"FT in Contrast to Resilience","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ft-when-to-use","title":"FT When to Use ?","text":""},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#ha-vs-ft","title":"HA vs FT","text":"

High Availability (HA) and Fault Tolerance (FT) are two strategies aimed at keeping systems operational, but they approach this goal differently. Here's a detailed comparison:

In other words, high availability aims to reduce downtime, whereas fault-tolerant systems aim for zero downtime, even during failures.

Aspect High Availability (HA) Fault Tolerance (FT) Definition Ensures minimal downtime by quickly switching to backup systems when a failure occurs. Ensures continuous operation even during failures, with no noticeable interruption. Goal Minimize downtime and ensure service is restored quickly. Eliminate downtime and maintain seamless operation during faults. Approach Uses redundant components and failover systems to switch operations when needed. Uses duplication of systems to ensure tasks are always mirrored on another system. Downtime Small amount of downtime during failover (milliseconds to minutes). No downtime; systems operate continuously, even during faults. Example Use Cases E-commerce websites (e.g., Amazon) that switch servers when one fails. Airplane control systems, which cannot afford any interruptions. Redundancy Type Active-Passive: Backup components are activated only when primary systems fail. Active-Active: All components are working simultaneously, and one continues if the other fails. Cost Less expensive since backup systems are not always active. More expensive due to constant replication and active systems running in parallel. Complexity Easier to implement and manage due to reliance on failover mechanisms. More complex, requiring real-time synchronization and parallel operation. Performance Impact Some performance hit during failover but minimal. Higher overhead, as multiple systems operate simultaneously. Use Case Example Cloud platforms (like AWS) use high availability to ensure that servers recover quickly after a failure. Nuclear power plants employ fault-tolerant systems to keep critical processes running with no interruptions. Failure Handling Handles component failures through redundancy and quick recovery mechanisms. Prevents failure from affecting the system by running identical processes or systems in parallel."},{"location":"techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/#summary","title":"Summary","text":"

In summary, high availability ensures that critical systems are always accessible with minimal interruptions. Organizations rely on HA strategies to meet customer expectations, protect revenue, and ensure business continuity, especially in industries where even a small amount of downtime can have serious consequences and fault tolerance is the ability of a system to keep operating without interruption despite experiencing faults or failures. It is crucial for mission-critical systems in industries like aviation, finance, and healthcare, where downtime or errors could lead to catastrophic outcomes.

In essence, High Availability focuses on minimizing downtime by recovering quickly from failures, while Fault Tolerance eliminates downtime altogether by ensuring the system continues running seamlessly. HA is less costly and easier to implement, while FT is expensive and complex but essential for critical environments where even a few seconds of downtime are unacceptable.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/","title":"Docker and Kubernetes","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#overview","title":"Overview","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#docker","title":"Docker","text":"

Docker is a platform that enables developers to build, package, and run applications in lightweight containers. It ensures applications are portable and can run consistently across different environments, from development to production.

Key Components

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#kubernetes","title":"Kubernetes","text":"

Kubernetes (often abbreviated as K8s) is an open-source platform for automating the deployment, scaling, and management of containerized applications. It abstracts away the complexity of running containers at scale across multiple machines.

Key Components

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#hierarchy-and-relationship","title":"Hierarchy and Relationship","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#docker-vs-kubernetes","title":"Docker vs Kubernetes","text":"

Docker focuses on building, packaging, and running containers. It handles application-level concerns whereas Kubernetes focuses on orchestrating, scaling, and managing containers across distributed environments.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#how-they-work-together","title":"How they Work Together","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#using-docker-with-kubernetes","title":"Using Docker with Kubernetes","text":"Steps

Create a Dockerfile for your application and build the image Step-1: Build Docker Images

docker build -t my-app:latest .\n

Store the image in a registry (e.g., Docker Hub) Step-2: Push to Registry

docker push my-app:latest\n

Use the built Docker image in a Kubernetes Deployment YAML file and apply it with: Deploy on Kubernetes

kubectl apply -f deployment.yaml\n

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#4-creating-and-managing-docker-and-kubernetes-components-individually","title":"4. Creating and Managing Docker and Kubernetes Components Individually","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#creating-docker-components","title":"Creating Docker Components","text":"
  1. Install Docker: Follow Docker\u2019s official installation guide for your operating system.

  2. Create a Dockerfile: Example:

    FROM python:3.9\nWORKDIR /app\nCOPY . .\nRUN pip install -r requirements.txt\nCMD [\"python\", \"app.py\"]\n

  3. Build and Run Docker Containers:

  4. Build Image:
    docker build -t my-app:latest .\n
  5. Run Container:

    docker run -d -p 5000:5000 my-app:latest\n

  6. Use Docker Compose:

  7. Define services in a docker-compose.yml:
    version: '3.8'\nservices:\n  web:\n    image: my-app:latest\n    ports:\n      - \"5000:5000\"\n
  8. Start services with:
    docker-compose up\n
"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#creating-kubernetes-components","title":"Creating Kubernetes Components","text":"
  1. Install Kubernetes: Use Minikube for local development or create a production cluster using cloud providers like GKE, AKS, or EKS.

  2. Define Kubernetes Resources: Create YAML manifests for Deployments and Services.

  3. Deployment YAML:

    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: my-app\n  template:\n    metadata:\n      labels:\n        app: my-app\n    spec:\n      containers:\n        - name: my-app\n          image: my-app:latest\n          ports:\n            - containerPort: 5000\n

  4. Service YAML:

    apiVersion: v1\nkind: Service\nmetadata:\n  name: my-app-service\nspec:\n  type: NodePort\n  ports:\n    - port: 5000\n      targetPort: 5000\n  selector:\n    app: my-app\n

  5. Deploy to Kubernetes:

    kubectl apply -f deployment.yaml\nkubectl apply -f service.yaml\n

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#5-deployment-scenarios","title":"5. Deployment Scenarios","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#single-machine-deployment","title":"Single Machine Deployment","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#multiple-machines-deployment","title":"Multiple Machines Deployment","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#6-communication-between-services","title":"6. Communication Between Services","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#7-key-differences-between-docker-compose-and-kubernetes","title":"7. Key Differences Between Docker Compose and Kubernetes","text":"Aspect Docker Compose Kubernetes Purpose Local development and testing Production orchestration Configuration Single docker-compose.yml file Multiple YAML files for resources Scaling Manual Automated scaling with kubectl scale High Availability Limited Built-in redundancy and self-healing Use Case Simple applications on one machine Complex workloads across clusters"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#8-additional-considerations","title":"8. Additional Considerations","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#9-how-to-use-docker-compose-to-get-the-application-from-github-and-build-the-docker-image","title":"9. How to Use Docker Compose to Get the Application from GitHub and Build the Docker Image","text":""},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#getting-from-github-and-using-docker-compose-build-context","title":"Getting from Github and using Docker Compose Build Context","text":"

Docker Compose can also use the build context to pull code directly from GitHub when creating an image. Here\u2019s how:

  1. Create a docker-compose.yml with GitHub Repository as the Build Context:
version: '3.8'\nservices:\n  app:\n    build:\n      context: https://github.com/your-username/your-repo.git\n      dockerfile: Dockerfile\n    ports:\n      - \"5000:5000\"\n
  1. Run Docker Compose to Build and Start the Container:
docker-compose up --build\n

Note: - This method works only if the GitHub repository is public. For private repositories, you\u2019ll need to provide authentication (e.g., via SSH keys or GitHub tokens). - Docker will pull the latest version from the specified GitHub repository and build the image based on the Dockerfile in the repository.

"},{"location":"techdives/DistrubutedSystems/DockerAndK8s/#10-silly-and-practical-questions-numbered","title":"10. Silly and Practical Questions (Numbered)","text":"
  1. Can I use a Docker Compose file with Kubernetes? Not directly. Kubernetes doesn\u2019t understand Docker Compose syntax, but there are tools like Kompose that can convert Docker Compose files into Kubernetes YAML files.

  2. What happens if I try to run a Docker Compose file inside a Kubernetes cluster? It won\u2019t work. Kubernetes will look at you confused (figuratively), because it expects YAML manifests with its own syntax, not a docker-compose.yml.

  3. Why do Kubernetes YAML files look scarier than Docker Compose files? Kubernetes YAML files are more complex because they handle more advanced scenarios like scaling, networking, and rolling updates, which Docker Compose doesn\u2019t attempt to address.

  4. Do I need to uninstall Docker if I switch to Kubernetes? Nope! Docker is still useful for building and testing images locally, even if you\u2019re deploying to Kubernetes. In fact, Kubernetes can use Docker as a container runtime.

  5. Will Docker containers fight with Kubernetes Pods if they run on the same machine? Nope, they\u2019ll coexist peacefully. Docker containers and Kubernetes Pods can run side by side without conflict. They\u2019re friends, not rivals!

  6. Can I copy-paste my Docker Compose file into Kubernetes and hope it works? Sorry, no shortcuts here. You need to convert the Compose file into Kubernetes resources, either manually or using tools like Kompose.

  7. Is Docker Compose faster than Kubernetes because it has fewer YAML files? Yes, Docker Compose is faster to set up for local development because it\u2019s simpler. But for production-scale orchestration, Kubernetes is much more powerful.

  8. How do I know if my container is happy inside a Kubernetes Pod? Check with this command:

    kubectl get pods\n
    If the status is Running, your container is content. If you see CrashLoopBackOff, it\u2019s definitely not happy!

  9. Can I use Kubernetes without the cloud, or will it complain? You can use Minikube or kind (Kubernetes in Docker) to run Kubernetes locally on your machine. No cloud required.

  10. What\u2019s the difference between docker-compose up and kubectl apply -f?

    • docker-compose up: Starts containers defined in a docker-compose.yml file.
    • kubectl apply -f: Deploys resources (like Pods, Deployments) described in a Kubernetes YAML file to your cluster.
  11. Do I still need to learn Docker Swarm if I already know Kubernetes? Not really. Docker Swarm is simpler but not as widely used in production as Kubernetes. Kubernetes has become the de facto standard.

  12. Can a single Pod run multiple Docker Compose services? Yes! A Pod can run multiple containers, similar to how Docker Compose runs multiple services. However, in Kubernetes, these containers should be tightly coupled (e.g., sharing resources).

  13. If Docker Compose is easier, why do people torture themselves with Kubernetes? Kubernetes offers features like scaling, self-healing, and load balancing. It\u2019s overkill for simple projects but essential for large, distributed applications.

  14. Is Kubernetes just a fancy way of saying, \u201cI don\u2019t want to use Docker Compose\u201d? Not exactly. Docker Compose is great for local setups, while Kubernetes is a powerful orchestration tool for running applications across multiple nodes at scale.

  15. What\u2019s the difference between a Pod and a Container? Can I use the words interchangeably? Not quite. A Pod is a wrapper that can contain one or more containers. Pods are the smallest deployable unit in Kubernetes, but a container is just an isolated environment for running applications.

  16. If a container crashes in Kubernetes, does Kubernetes get sad? Nope! Kubernetes will restart the container automatically. That\u2019s part of its self-healing magic.

  17. Will my application break if I use a Docker image from 2015? It might! Older images could have compatibility issues or security vulnerabilities. Use them only if you\u2019re sure they still meet your needs.

  18. Is Kubernetes allergic to Windows, or will it run happily there? Kubernetes supports Windows nodes, but the experience is smoother with Linux. Most people deploy Kubernetes on Linux-based clusters.

  19. Can I use both Docker and Kubernetes at the same time? Or will it cause chaos? Yes, you can use both. Build your containers with Docker, push them to a registry, and deploy them with Kubernetes. No chaos \u2013 just smooth workflows.

  20. Why can\u2019t Docker Compose just learn scaling and take over Kubernetes' job? Docker Compose is intentionally lightweight and simple. Adding Kubernetes-like features would complicate it and defeat its original purpose.

  21. How much YAML is too much YAML? If you start dreaming in YAML, it\u2019s probably too much. But seriously, Kubernetes relies heavily on YAML, so learning to manage it effectively is key.

  22. Can Kubernetes work without YAML files? (Please say yes!) Unfortunately, no. YAML files are essential for defining resources in Kubernetes. You can use Helm charts to simplify it, but YAML is unavoidable.

  23. What happens if I forget to push my Docker image before deploying with Kubernetes? Your deployment will fail because Kubernetes won\u2019t find the image in the registry. Always remember to push!

  24. Can I use kubectl commands on Docker containers? Nope. kubectl is specifically for managing Kubernetes resources. Use docker commands for Docker containers.

  25. Is Kubernetes only for tech wizards, or can normal humans use it too? Normal humans can use it too! The learning curve is steep, but with practice, anyone can master it.

  26. Do I need to sacrifice sleep to understand Kubernetes? Maybe at first. But once you get the hang of it, Kubernetes will become your friend, and sleep will return.

  27. Can a Docker container tell the difference between running on Kubernetes and Docker Compose? Nope! The container itself doesn\u2019t care where it\u2019s running. As long as it gets its dependencies and configuration, it\u2019ll happily run anywhere.

  28. Can I run two Docker Compose files on one machine? Yes, use the -p option to specify different project names for each Compose file.

  29. Can services communicate across multiple machines? Yes, with Docker Swarm or Kubernetes, services can communicate across machines using overlay networks or Kubernetes networking.

  30. Is Docker Compose suitable for production? Not recommended for large-scale production. Use Kubernetes or Docker Swarm instead.

  31. How do I set up Kubernetes on a single machine? Use Minikube to run a local Kubernetes cluster.

  32. What file formats are used by Docker and Kubernetes? Docker uses Dockerfile and docker-compose.yml. Kubernetes uses YAML files for resources like Deployments and Services.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/","title":"ElasticSearch","text":"

Elasticsearch is a search engine based on Apache Lucene. It provides a distributed, multitenant-capable full-text search engine with an HTTP web interface and schema-free JSON documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#elasticsearch-basics-and-fundamentals","title":"ElasticSearch Basics and Fundamentals","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-core-concepts","title":"1. Core Concepts:","text":"

Diving into Elasticsearch's core concepts is essential for understanding its architecture and functionality.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-documents-and-indices","title":"1.1 Documents and Indices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-mapping-and-types","title":"1.2 Mapping and Types","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-shards-and-replicas","title":"1.3 Shards and Replicas","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-cluster-nodes-and-roles","title":"1.4 Cluster, Nodes, and Roles","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#15-elasticsearch-api-actions-crud","title":"1.5 Elasticsearch API Actions (CRUD)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-the-core-concepts","title":"Summary of the Core Concepts","text":"
  1. Data is divided into documents, stored in indices, and distributed across shards.
  2. Nodes work together in a cluster, balancing the load for efficient querying and data redundancy.
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-inverted-index","title":"2. Inverted Index","text":"

An inverted index is a fundamental data structure in Elasticsearch and other search engines. It optimizes search efficiency by storing a mapping from terms (words) to their locations within documents. Let\u2019s break it down into key components and processes:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-core-structure-of-inverted-index","title":"2.1. Core Structure of Inverted Index","text":"

For instance, if a dataset contains the documents: - Doc 1: \u201cElasticsearch powers search\u201d - Doc 2: \u201cSearch powers insights\u201d

The inverted index would look like this:

\"Elasticsearch\" -> [Doc 1]\n\"powers\" -> [Doc 1, Doc 2]\n\"search\" -> [Doc 1, Doc 2]\n\"insights\" -> [Doc 2]\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-building-the-inverted-index","title":"2.2. Building the Inverted Index","text":"

The process involves several stages: - Tokenization: Splitting text into words or tokens. - Normalization: Making tokens consistent, like converting to lowercase. - Stemming/Lemmatization (optional): Reducing words to their base or root forms. - Indexing: Populating the index with terms and the corresponding document references.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-how-searches-work","title":"2.3. How Searches Work","text":"

When a user searches for a term, Elasticsearch retrieves the postings list from the inverted index, quickly locating documents containing that term. For multi-term queries, Elasticsearch can intersect postings lists, using logical operations (e.g., AND, OR) to combine or filter results.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-optimizations","title":"2.4. Optimizations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-benefits","title":"2.5. Benefits","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-the-inverted-index","title":"Summary of the Inverted Index","text":"

Inverted indices are the foundation of Elasticsearch\u2019s speed and relevance in text search. This structure is tailored for high performance in full-text search scenarios, especially when complex queries and filtering are involved.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-analyzers","title":"3. Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-what-is-an-analyzer","title":"3.1. What is an Analyzer?","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-components-of-an-analyzer","title":"3.2. Components of an Analyzer","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-built-in-analyzers","title":"3.3. Built-in Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-custom-analyzer-configuration","title":"3.4. Custom Analyzer Configuration","text":"

Creating a custom analyzer involves defining: - A tokenizer (e.g., edge-ngram tokenizer for partial word matches). - A list of token filters to process the tokens (e.g., synonym filters, ASCII folding for diacritical marks).

Example configuration:

{\n  \"analysis\": {\n    \"analyzer\": {\n      \"custom_analyzer\": {\n        \"type\": \"custom\",\n        \"tokenizer\": \"whitespace\",\n        \"filter\": [\"lowercase\", \"stop\", \"synonym\"]\n      }\n    }\n  }\n}\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-usage-during-indexing-and-querying","title":"3.5. Usage During Indexing and Querying","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-practical-applications-of-custom-analyzers","title":"3.6. Practical Applications of Custom Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-benefits-of-analyzers","title":"3.7. Benefits of Analyzers","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-analyzers","title":"Summary of Analyzers.","text":"

Analyzers transform raw text into optimized, searchable data, playing a critical role in making Elasticsearch searches accurate and efficient.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-elasticsearch-queries","title":"4. ElasticSearch Queries","text":"

In Elasticsearch, queries are central to retrieving data. They\u2019re categorized as leaf queries (operating on specific fields) and compound queries (combining multiple queries). Here's a deep dive into each type, with examples to illustrate their functionality:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-leaf-queries","title":"4.1 Leaf Queries","text":"

These are standalone, field-specific queries (like term and match above) that don\u2019t depend on other queries to function.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#411-match-queries-for-full-text-search","title":"4.1.1 Match Queries (for Full-Text Search)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#412-term-queries-for-structured-data","title":"4.1.2. Term Queries (for Structured Data)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-compound-queries","title":"4.2. Compound Queries","text":"

Compound queries allow for complex logic by combining multiple queries, enabling fine-grained control over query conditions and relevance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#421-bool-query","title":"4.2.1. Bool Query","text":"

The most flexible compound query, allowing logic-based combinations:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#complex-boolean-query-example","title":"Complex Boolean Query Example","text":"

Combining multiple clauses:

{\n  \"query\": {\n    \"bool\": {\n      \"must\": [\n        { \"match\": { \"title\": \"Elasticsearch\" } }\n      ],\n      \"should\": [\n        { \"match\": { \"category\": \"tutorial\" } },\n        { \"match\": { \"category\": \"guide\" } }\n      ],\n      \"must_not\": [\n        { \"term\": { \"status\": \"archived\" } }\n      ],\n      \"filter\": [\n        { \"range\": { \"publish_date\": { \"gte\": \"2023-01-01\" } } }\n      ]\n    }\n  }\n}\n
This query retrieves documents with \"Elasticsearch\" in the title, optionally boosts relevance if the document is in \"tutorial\" or \"guide\" categories, excludes documents marked as \"archived,\" and only includes documents published after January 1, 2023.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#422-dis-max-query-disjunction-max","title":"4.2.2. Dis Max Query (disjunction max):","text":"

Optimizes for the highest relevance score among multiple queries, often used when querying across similar fields with varied wording. - Example: Searching for the most relevant match between \u201ctitle\u201d and \u201cdescription\u201d fields:

{\n    \"query\": {\n        \"dis_max\": {\n            \"queries\": [\n                { \"match\": { \"title\": \"elastic search\" } },\n                { \"match\": { \"description\": \"elastic search\" } }\n            ]\n        }\n    }\n}\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#43-geo-queries","title":"4.3 Geo Queries","text":"

Elasticsearch provides several geo-specific queries for filtering and scoring documents based on geographic location:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#431-geo-bounding-box-query","title":"4.3.1 Geo Bounding Box Query","text":"

Defines a rectangular area by specifying two corner points (top-left and bottom-right). Documents with locations inside this box are matched.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#432-geo-distance-query","title":"4.3.2 Geo Distance Query","text":"

Finds documents within a certain distance from a point. Useful for proximity searches, like \"find stores within 10 miles.\"

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#433-geo-polygon-query","title":"4.3.3 Geo Polygon Query","text":"

Searches within a polygon defined by a series of latitude and longitude points, allowing for irregular area shapes.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#434-geo-shape-query","title":"4.3.4 Geo Shape Query","text":"

The geo_shape query allows for more complex spatial filtering, using pre-defined shapes like circles, polygons, or lines. This is often used with indexed geometries.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#435-using-geo-filters-with-bool-queries","title":"4.3.5 Using Geo Filters with Bool Queries","text":"

Geo filters are often used in combination with other query types within bool queries, allowing flexible, location-based filtering along with other criteria.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-of-a-combined-geo-and-bool-query","title":"Example of a Combined Geo and Bool Query","text":"

Finds published documents within a specific area and filters out archived content.

{\n  \"query\": {\n    \"bool\": {\n      \"must\": [\n        { \"term\": { \"status\": \"published\" } }\n      ],\n      \"filter\": {\n        \"geo_distance\": {\n          \"distance\": \"50km\",\n          \"location\": {\n            \"lat\": 40.7128,\n            \"lon\": -74.0060\n          }\n        }\n      },\n      \"must_not\": [\n        { \"term\": { \"status\": \"archived\" } }\n      ]\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-elastic-queries","title":"Summary of Elastic Queries","text":"

These queries, combined thoughtfully, make Elasticsearch highly adaptable to various search needs.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-aggregations","title":"5. Aggregations","text":"

Elasticsearch\u2019s aggregation framework is divided into metrics and bucket aggregations. Here\u2019s a deep dive into each, with subtypes and examples.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#51-metrics-aggregations","title":"5.1. Metrics Aggregations","text":"

These calculate values from field data, like sums or averages.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#52-bucket-aggregations","title":"5.2. Bucket Aggregations","text":"

These create groups (buckets) of documents based on field values or criteria. Each bucket can contain documents matching conditions and may contain further sub-aggregations.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#53-combining-aggregations","title":"5.3. Combining Aggregations","text":"

Each aggregation can nest other aggregations, allowing complex analysis structures.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-of-a-combined-aggregation","title":"Example of a Combined Aggregation","text":"

Calculate the average order amount by city and age range:

{\n  \"aggs\": {\n    \"by_city\": {\n      \"terms\": { \"field\": \"city\" },\n      \"aggs\": {\n        \"age_ranges\": {\n          \"range\": { \"field\": \"age\", \"ranges\": [{ \"to\": 20 }, { \"from\": 20, \"to\": 30 }, { \"from\": 30 }] },\n          \"aggs\": {\n            \"avg_order_amount\": { \"avg\": { \"field\": \"order_amount\" } }\n          }\n        }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-aggregations","title":"Summary of Aggregations","text":"

With these aggregations, Elasticsearch becomes a powerful analytics engine, enabling sophisticated data analysis directly within the index.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-sorting","title":"6. Sorting","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#61-basic-sorting","title":"6.1. Basic Sorting","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#62-field-based-sorting","title":"6.2. Field-based Sorting","text":"
{\n  \"sort\": [\n    { \"release_date\": { \"order\": \"desc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#63-multiple-sort-fields","title":"6.3. Multiple Sort Fields","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\" } },\n    { \"rating\": { \"order\": \"desc\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#64-nested-sorting","title":"6.4. Nested Sorting","text":"
{\n  \"sort\": [\n    {\n      \"products.price\": {\n        \"order\": \"asc\",\n        \"nested\": {\n          \"path\": \"products\",\n          \"filter\": { \"range\": { \"products.price\": { \"gt\": 10 } } }\n        }\n      }\n    }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#65-geolocation-sorting","title":"6.5. Geolocation Sorting","text":"
{\n  \"sort\": [\n    {\n      \"_geo_distance\": {\n        \"location\": \"40.715, -73.988\",\n        \"order\": \"asc\",\n        \"unit\": \"km\"\n      }\n    }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#66-script-based-sorting","title":"6.6. Script-based Sorting","text":"
{\n  \"sort\": {\n    \"_script\": {\n      \"type\": \"number\",\n      \"script\": {\n        \"source\": \"doc['price'].value * params.factor\",\n        \"params\": { \"factor\": 1.2 }\n      },\n      \"order\": \"desc\"\n    }\n  },\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#67-missing-values","title":"6.7. Missing Values","text":"
{\n  \"sort\": [\n    { \"price\": { \"order\": \"asc\", \"missing\": \"_last\" } }\n  ],\n  \"query\": { \"match_all\": {} }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#68-sorting-in-aggregations","title":"6.8. Sorting in Aggregations","text":"
{\n  \"aggs\": {\n    \"top_brands\": {\n      \"terms\": {\n        \"field\": \"brand.keyword\",\n        \"order\": { \"_count\": \"desc\" }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-relevance-scoring","title":"7. Relevance Scoring","text":"

Elasticsearch's relevance scoring is crucial for ranking documents based on their similarity to a query. Here\u2019s an in-depth look:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#71-scoring-mechanism-and-bm25-algorithm","title":"7.1. Scoring Mechanism and BM25 Algorithm","text":"

The BM25 (Best Matching 25) algorithm is Elasticsearch\u2019s default relevance scoring algorithm. BM25 improves upon traditional TF-IDF (Term Frequency-Inverse Document Frequency) by adjusting term frequency saturation and document length normalization, providing more nuanced relevance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#core-components-of-bm25","title":"Core Components of BM25:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#72-calculating-the-bm25-score","title":"7.2. Calculating the BM25 Score","text":"

The BM25 formula combines these components, with two main parameters: - k1: Controls term frequency saturation (default around 1.2). Higher values give more influence to term frequency. - b: Controls length normalization (default around 0.75). Higher values penalize longer documents more strongly. - BM25 Alogirthm

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#73-understanding-scoring-in-elasticsearch-queries","title":"7.3. Understanding Scoring in Elasticsearch Queries","text":"

In Elasticsearch, relevance scores are generated by the \"match\" or \"multi_match\" queries. Each document receives a relevance score, and results are ranked based on these scores. You can inspect scores using the \"explain\": true parameter, which details each document\u2019s score and shows how BM25 factors contribute.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#example-query-with-relevance-scoring","title":"Example Query with Relevance Scoring:","text":"

{\n  \"query\": {\n    \"match\": {\n      \"content\": {\n        \"query\": \"Elasticsearch relevance scoring\",\n        \"boost\": 1.5\n      }\n    }\n  },\n  \"explain\": true\n}\n
This query searches for \"Elasticsearch relevance scoring\" in the content field. The \"boost\" parameter can emphasize this field for relevance, while \"explain\": true helps analyze the scoring breakdown.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#74-improving-relevance-with-advanced-techniques","title":"7.4. Improving Relevance with Advanced Techniques","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#75-practical-use-cases-for-bm25-in-elasticsearch","title":"7.5. Practical Use Cases for BM25 in Elasticsearch","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#summary-of-relevance-score","title":"Summary of Relevance Score","text":"

Relevance scoring with BM25 is foundational to Elasticsearch\u2019s search quality, offering powerful controls for tuning results to your specific needs.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-pagination-and-cursors","title":"8. Pagination and Cursors","text":"

Pagination in Elasticsearch is essential for handling large result sets efficiently, as it prevents overwhelming the client and server. Elasticsearch offers different methods for pagination, each with specific use cases. Let's break down each method:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#81-basic-pagination-with-from-and-size","title":"8.1. Basic Pagination with from and size","text":"

Example:

{\n  \"from\": 20,\n  \"size\": 10,\n  \"query\": { \"match_all\": {} }\n}\n
This retrieves results from the 21st to the 30th position.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#82-search-after-for-deep-pagination","title":"8.2. Search After for Deep Pagination","text":"

Example:

{\n  \"sort\": [ { \"timestamp\": \"asc\" }, { \"id\": \"asc\" } ],\n  \"size\": 10,\n  \"query\": { \"match_all\": {} },\n  \"search_after\": [1627489200, \"XYZ123\"] \n}\n
Here, search_after takes the values from the timestamp and id fields of the last document on the previous page, ensuring seamless navigation.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#83-scroll-api-for-bulk-data-retrieval","title":"8.3. Scroll API for Bulk Data Retrieval","text":"

Example Workflow: - First, initiate a scroll session:

{\n  \"size\": 100,\n  \"query\": { \"match_all\": {} },\n  \"scroll\": \"1m\" \n}\n
- Use the _scroll_id returned by the initial request to retrieve subsequent pages:
{\n  \"scroll\": \"1m\",\n  \"scroll_id\": \"DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAA...\"\n}\n

After each scroll request, repeat until the returned results are empty, which indicates that all documents have been retrieved.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#84-point-in-time-pit-for-real-time-pagination-with-consistency","title":"8.4. Point-in-Time (PIT) for Real-time Pagination with Consistency","text":"

Example Workflow: - First, initiate a Point-in-Time session:

POST /index_name/_pit?keep_alive=1m\n
- Use the pit_id with search_after for paginated queries:
{\n  \"size\": 10,\n  \"query\": { \"match_all\": {} },\n  \"pit\": { \"id\": \"PIT_ID\", \"keep_alive\": \"1m\" },\n  \"sort\": [ { \"timestamp\": \"asc\" }, { \"id\": \"asc\" } ],\n  \"search_after\": [1627489200, \"XYZ123\"]\n}\n
- Close the PIT session when done:
DELETE /_pit\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#85-limitations-and-considerations","title":"8.5. Limitations and Considerations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#86-summary-table-of-pagination-techniques","title":"8.6. Summary Table of Pagination Techniques","text":"Method Use Case Limitations Example Scenarios from & size Simple pagination for small datasets Performance drop for large from values Basic search pages, small datasets search_after Deep pagination without from overhead Requires sorted fields, can\u2019t skip pages Infinite scrolling, data tables with lots of records Scroll API Bulk data export/processing High memory usage, no real-time consistency Data migration, report generation Point-in-Time Consistent real-time pagination Needs frequent re-creation to avoid memory issues Dashboards, applications requiring consistent views

Each method serves specific needs, balancing consistency, performance, and real-time capabilities. This setup allows Elasticsearch to handle vast and dynamic datasets while supporting efficient data retrieval.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-facets-and-filters","title":"9. Facets and Filters","text":"

Faceting creates summaries of data, useful for search result filtering, like categorizing search results by price or brand. Filters, on the other hand, optimize performance by narrowing down documents without affecting scoring.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#91-facets-aggregations","title":"9.1. Facets (Aggregations)","text":"

Faceting is a process in Elasticsearch that aggregates search results, providing structured summaries for complex queries. For example, if searching for \"laptops,\" facets can aggregate results by price range, brand, or processor type, allowing users to filter search results dynamically.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#92-filters-filtering-without-scoring-impact","title":"9.2. Filters: Filtering without Scoring Impact","text":"

Filters enable the narrowing of search results by criteria (e.g., price < $500), improving query efficiency and bypassing relevance scoring. They\u2019re often used to pre-process data before a full-text search and work well with caches, resulting in faster query performance.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#93-combining-facets-and-filters-in-search-applications","title":"9.3. Combining Facets and Filters in Search Applications","text":"

In complex search interfaces, facets allow users to drill down through categories, while filters further refine their selections without recalculating scores, ensuring responsive user experiences.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#94-sample-implementations","title":"9.4. Sample Implementations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#941-creating-facets-aggregations","title":"9.4.1. Creating Facets (Aggregations)","text":"

To implement facets, you\u2019ll define bucket aggregations to group and categorize data. For instance, creating a facet for \"price range\" and \"brand\" in a search for laptops:

GET /products/_search\n{\n  \"query\": {\n    \"match\": { \"description\": \"laptop\" }\n  },\n  \"aggs\": {\n    \"price_ranges\": {\n      \"range\": {\n        \"field\": \"price\",\n        \"ranges\": [\n          { \"to\": 500 },\n          { \"from\": 500, \"to\": 1000 },\n          { \"from\": 1000 }\n        ]\n      }\n    },\n    \"brands\": {\n      \"terms\": { \"field\": \"brand.keyword\" }\n    }\n  }\n}\n

This example provides a breakdown of price ranges and a count of each brand, creating flexible filters users can click on to refine results.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#942-using-filters-for-optimized-performance","title":"9.4.2. Using Filters for Optimized Performance","text":"

Filters improve performance by narrowing results without scoring. Here\u2019s an example of using a bool query with a filter clause:

GET /products/_search\n{\n  \"query\": {\n    \"bool\": {\n      \"must\": {\n        \"match\": { \"description\": \"laptop\" }\n      },\n      \"filter\": [\n        { \"term\": { \"in_stock\": true } },\n        { \"range\": { \"price\": { \"lt\": 1000 } } }\n      ]\n    }\n  }\n}\n

In this query, in_stock and price filters optimize search results without affecting scoring.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#95-performance-optimization-techniques","title":"9.5. Performance Optimization Techniques","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#951-caching-filters","title":"9.5.1. Caching Filters","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#952-minimize-full-text-search-in-filters","title":"9.5.2. Minimize Full-Text Search in Filters","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#953-selective-use-of-aggregations","title":"9.5.3. Selective Use of Aggregations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#954-balancing-shard-count","title":"9.5.4. Balancing Shard Count","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#cluster-architecture","title":"Cluster Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-cluster-overview-and-node-responsibilities","title":"1. Cluster Overview and Node Responsibilities","text":"

Each node type in an Elasticsearch cluster has specialized roles that allow it to handle different aspects of indexing, searching, and managing data. Let's explore the node types in detail.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-master-node","title":"1.1. Master Node","text":"

The master node is the cluster\u2019s brain, responsible for the overall management and health of the cluster.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-data-node","title":"1.2. Data Node","text":"

Data nodes are the primary nodes for storing data, processing indexing operations, and executing search and aggregation requests.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-coordinating-node","title":"1.3. Coordinating Node","text":"

Also known as the client node, the coordinating node acts as a router for client requests, managing query distribution and response aggregation.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-ingest-node","title":"1.4. Ingest Node","text":"

Ingest nodes preprocess data before it is indexed, often using ingest pipelines to transform and enrich data.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-cluster-hierarchy-overview","title":"2. Cluster Hierarchy Overview","text":"

Each level in the cluster architecture plays a role in organizing and distributing data efficiently across nodes.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-cluster","title":"2.1. Cluster","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-index","title":"2.2. Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-shards","title":"2.3. Shards","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-segments","title":"2.4. Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-documents","title":"2.5. Documents","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-elasticsearch-request-flow-and-processing","title":"3. Elasticsearch Request Flow and Processing","text":"

To deeply understand how request flow and cluster operations work in Elasticsearch, let\u2019s walk through each stage of the process in detail:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-receiving-a-request","title":"3.1. Receiving a Request","text":"

When a client sends a request to Elasticsearch, it can be either a query (search request) or an indexing (write) request. Here\u2019s how this begins:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-breaking-down-the-request","title":"3.2. Breaking Down the Request","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-execution-at-shard-level","title":"3.3. Execution at Shard Level","text":"

At this stage, each shard executes the request locally. This process differs slightly between a search request and an indexing request.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#331-query-execution-search-requests","title":"3.3.1. Query Execution (Search Requests)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#332-indexing-execution-write-requests","title":"3.3.2. Indexing Execution (Write Requests)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-response-consolidation","title":"3.4. Response Consolidation","text":"

After the query or indexing operation completes, the coordinating node consolidates the response:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-indexing-flow-details","title":"3.5. Indexing Flow Details","text":"

The indexing flow includes several key mechanisms that ensure data consistency and durability:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-request-flow-and-cluster-operations-summary","title":"3.6. Request Flow and Cluster Operations Summary","text":"

To visualize this, here\u2019s a simplified flow of the entire process:

  1. Client Sends Request (Search or Index) \u2192 Coordinating Node Receives Request
  2. Coordinating Node Identifies Relevant Shards (and chooses primary or replica shards)
  3. Execution on Shards:
  4. Query Phase (Search):
    • Query executed on selected shards.
    • Each shard returns IDs and scores of matching documents.
  5. Indexing Phase (Write):
    • Document written to the primary shard.
    • Changes forwarded to replica shards.
  6. Fetch Phase (Search):
  7. Fetches full documents for the top results.
  8. Consolidation and Response:
  9. Coordinating node merges, ranks, and sorts results for search.
  10. Coordinating node confirms write operation on all replicas for indexing.
  11. Final Response Sent to Client
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-additional-considerations","title":"3.7. Additional Considerations","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-lucene-segments-and-index-structures","title":"4. Lucene, Segments, and Index Structures","text":"

To thoroughly understand Elasticsearch's storage and retrieval mechanisms, let\u2019s go deep into Lucene's segments, inverted index, and advanced data structures like BKD trees. Lucene, at its core, powers Elasticsearch, giving it the ability to handle and query massive datasets with impressive speed and efficiency.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-lucene-and-segments","title":"4.1. Lucene and Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#411-what-is-a-segment","title":"4.1.1. What is a Segment?","text":"

A segment in Lucene is a self-contained, immutable collection of documents that forms a subset of a shard. Each segment is essentially a mini-index with its own data structures, including inverted indexes, stored fields, and other data structures to facilitate efficient searching and retrieval.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#412-segment-merging","title":"4.1.2 Segment Merging","text":"

Example: - Imagine a shard with 100 small segments. Lucene might merge them into fewer, larger segments (say, 10 segments), consolidating their data and removing any \"marked as deleted\" documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#413-advantages-of-segments","title":"4.1.3. Advantages of Segments","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-lucenes-inverted-index","title":"4.2. Lucene\u2019s Inverted Index","text":"

The inverted index is Lucene\u2019s most fundamental data structure and is the backbone of Elasticsearch\u2019s fast full-text search capabilities.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#421-structure-of-an-inverted-index","title":"4.2.1. Structure of an Inverted Index","text":"

The inverted index allows quick lookups by mapping terms to postings lists (lists of documents containing each term).

Example: - Suppose you index the text \"Elasticsearch is scalable search\". The inverted index might look like this:

Term          Documents\n------------------------\n\"Elasticsearch\" [1]\n\"is\"            [1, 2]\n\"scalable\"      [1, 3]\n\"search\"        [1]\n

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#422-advantages-of-inverted-index","title":"4.2.2. Advantages of Inverted Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#423-additional-optimizations-in-inverted-index","title":"4.2.3. Additional Optimizations in Inverted Index","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#43-bkd-trees-and-doc-values","title":"4.3. BKD Trees and Doc Values","text":"

Apart from the inverted index, Lucene also uses specialized data structures to handle numeric and spatial data.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#431-bkd-trees","title":"4.3.1. BKD Trees","text":"

BKD Trees are used in Elasticsearch for indexing and querying numeric, date, and geospatial data, especially for high-cardinality fields (fields with a large number of unique values).

Example: - Suppose you have a field geo_point representing user locations. A BKD tree indexes these coordinates, allowing Elasticsearch to quickly retrieve points within a bounding box or radius without scanning all documents.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#structure-of-a-bkd-tree","title":"Structure of a BKD Tree","text":"

BKD trees are essentially a form of a k-d tree (k-dimensional tree), optimized for indexing and searching over multiple dimensions. Each dimension can represent a distinct numeric field (e.g., latitude, longitude, or timestamp).

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#using-bkd-trees-for-queries","title":"Using BKD Trees for Queries","text":"

BKD trees efficiently handle:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-bkd-trees","title":"Advantages of BKD Trees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#432-doc-values","title":"4.3.2. Doc Values","text":"

Doc values enable efficient retrieval of field values for sorting, aggregation, and faceting. Instead of retrieving data from inverted indexes (which are optimized for search), doc values provide a columnar storage format that is ideal for analytical tasks.

Example: - Sorting results by price in a large index of products: - Doc values store price in a single column, which Elasticsearch reads to quickly sort documents without scanning each document individually.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#structure-of-doc-values","title":"Structure of Doc Values","text":"Doc values store fields in column-oriented storage rather than row-oriented storage:"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#types-of-doc-values","title":"Types of Doc Values","text":"

Doc values are defined by the field type:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#using-doc-values-in-queries","title":"Using Doc Values in Queries","text":"

Doc values are essential for operations such as:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-doc-values","title":"Advantages of Doc Values","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#433-summary-of-lucene-data-structures-in-elasticsearch","title":"4.3.3. Summary of Lucene Data Structures in Elasticsearch","text":"Data Structure Purpose Use Cases Benefits Segments Immutable sub-indices within a shard All document storage Concurrent searches, immutability Inverted Index Maps terms to documents Full-text search Fast term lookups BKD Trees Indexes numeric and multidimensional data Geospatial, timestamp queries Efficient range queries Doc Values Columnar storage for fields Sorting, aggregations Optimized memory usage Point Data Types Indexes geographic points (latitude-longitude) Proximity, bounding box queries Fast geospatial indexing"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#44-advanced-data-structures-in-lucene","title":"4.4. Advanced Data Structures in Lucene","text":"

Let's dive into the point data types and spatial indexes used in Elasticsearch, especially focusing on how it handles geospatial data with geo_point fields. We\u2019ll look at how Quadtrees and R-trees work, their role in spatial indexing, and how they support geospatial queries such as bounding box and proximity searches.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#441-overview-of-spatial-data-in-elasticsearch","title":"4.4.1. Overview of Spatial Data in Elasticsearch","text":"

Elasticsearch supports geospatial data using the geo_point and geo_shape data types: - geo_point: Stores latitude-longitude pairs for points on a map and is primarily used for proximity searches (e.g., \u201cfind locations within 10km\u201d). - geo_shape: Used for more complex shapes, such as polygons or multipoints, and is suitable for defining geographical areas like cities or lakes.

Geospatial queries include: - Bounding Box Queries: Searches for documents within a specific rectangle defined by coordinates. - Distance Queries: Searches for documents within a specified radius from a point. - Polygon Queries: Searches for documents within or intersecting with a complex polygonal area.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#442-quadtrees-and-r-trees-in-spatial-indexing","title":"4.4.2. Quadtrees and R-trees in Spatial Indexing","text":"

Quadtrees and R-trees are tree-based data structures that organize spatial data by dividing the space into hierarchical grids or regions, allowing efficient geospatial query processing.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#quadtrees","title":"Quadtrees","text":"

Quadtrees are hierarchical, 2-dimensional spatial indexes that recursively partition space into four quadrants or nodes, making them highly suitable for spatial data like latitude and longitude pairs.

Example: Imagine we have a city map with thousands of restaurants, each represented as a point (latitude, longitude). - A quadtree organizes the map into quadrants based on restaurant density. Denser regions are divided further to create sub-quadrants. - To find restaurants within a specific neighborhood, the quadtree quickly filters out distant quadrants, only scanning nearby ones, significantly speeding up search.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-quadtrees","title":"Advantages of Quadtrees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#r-trees","title":"R-trees","text":"

R-trees are another popular spatial data structure used to index multi-dimensional data (e.g., geographic shapes) by grouping nearby objects in bounding rectangles.

Example: Consider a map with various regions, like parks, lakes, and neighborhoods, each represented as a polygon. - An R-tree groups these polygons in bounding rectangles based on location. Polygons that are close to each other fall under the same rectangle. - When searching for parks within a 5km radius, the R-tree discards rectangles outside this range, only exploring relevant areas to find matching polygons.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#advantages-of-r-trees","title":"Advantages of R-trees","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#443-lucenes-internal-use-of-quadtrees-and-r-trees","title":"4.4.3. Lucene's Internal Use of Quadtrees and R-trees","text":"

While Elasticsearch doesn\u2019t directly expose Quadtrees and R-trees as configurations, Lucene, its underlying search library, utilizes versions of these structures to handle spatial indexing efficiently.

Lucene optimizes these data structures to fit within its segment-based storage, allowing them to scale across multiple indices and segments, handling both large-scale geospatial queries and basic point-based distance queries.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#444-spatial-query-processing-in-elasticsearch","title":"4.4.4. Spatial Query Processing in Elasticsearch","text":"

Using these data structures, Elasticsearch processes spatial queries as follows:

  1. Bounding Box Query:
  2. For a rectangular region, Elasticsearch leverages Quadtrees to restrict the search space to quadrants that intersect with the bounding box. Points or shapes within these quadrants are retrieved.

  3. Distance Query:

  4. For a proximity search (e.g., finding locations within 5km of a point), the Geo Distance Filter calculates distances from a central point and retrieves points from quadrants or nodes that fall within this radius.

  5. Polygon Query:

  6. For complex polygons (e.g., \u201cfind all parks within a specific neighborhood\u201d), Elasticsearch uses an R-tree structure to store polygonal shapes in bounding rectangles, allowing fast intersection tests with other regions.
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#445-summary-table-of-spatial-data-structures-in-elasticsearch","title":"4.4.5. Summary Table of Spatial Data Structures in Elasticsearch","text":"Data Structure Purpose Use Cases Key Characteristics Quadtrees Efficient point indexing Bounding box, proximity searches Hierarchical grid of quadrants R-trees Complex shape and polygon indexing Intersection, overlap queries Bounding rectangles with hierarchical nodes BKD Trees Multi-dimensional numeric data Numeric and geo-distance filters Balanced k-d tree with blocks of data points"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#446-practical-applications-and-benefits","title":"4.4.6. Practical Applications and Benefits","text":"

These data structures optimize spatial queries in Elasticsearch, allowing it to handle diverse geospatial data efficiently. For example: - Bounding Box Queries are accelerated by Quadtrees, making them ideal for finding all points in a geographic area. - Distance Queries are optimized by both Quadtrees and BKD trees, allowing real-time retrieval of nearby points. - Polygon Queries are handled by R-trees, which efficiently manage irregular shapes and large polygons for accurate intersection checks.

By integrating these structures into Lucene, Elasticsearch supports powerful geospatial capabilities across various applications, including mapping services, logistics, and location-based searches.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#45-how-lucenes-structures-fit-into-elasticsearchs-query-flow","title":"4.5. How Lucene's Structures Fit into Elasticsearch\u2019s Query Flow","text":"
  1. Document Indexing:
  2. As documents are indexed, Lucene tokenizes text fields, stores terms in the inverted index, and creates doc values for fields that require sorting or aggregation.

  3. Segment Creation:

  4. Documents are grouped into segments, with each segment containing its own inverted index, BKD trees, and doc values.

  5. Query Execution:

  6. Term-based Queries: The inverted index quickly retrieves documents containing specific terms.
  7. Numeric or Geospatial Queries: BKD trees are used to retrieve documents within a certain numeric range or geographic area.
  8. Sorting and Aggregation: Doc values facilitate sorting by loading field values column-by-column rather than document-by-document.

Lucene\u2019s well-designed structures\u2014segments, inverted indexes, and multidimensional BKD trees\u2014create the foundation for Elasticsearch\u2019s speed and scalability, enabling it to support complex queries and large datasets efficiently.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-visual-representation-of-cluster-architecture-hierarchy-flow","title":"5. Visual Representation of Cluster Architecture (Hierarchy Flow)","text":"
Elasticsearch Cluster\n\u2514\u2500\u2500 Nodes\n    \u251c\u2500\u2500 Master Node\n    \u2502   \u251c\u2500\u2500 Manages cluster state\n    \u2502   \u2514\u2500\u2500 Handles shard allocation and rebalancing\n    \u251c\u2500\u2500 Data Node\n    \u2502   \u251c\u2500\u2500 Stores data and handles indexing/searching\n    \u2502   \u251c\u2500\u2500 Manages primary and replica shards\n    \u2502   \u2514\u2500\u2500 Processes local queries and aggregations\n    \u251c\u2500\u2500 Coordinating Node\n    \u2502   \u251c\u2500\u2500 Routes client requests to data nodes\n    \u2502   \u251c\u2500\u2500 Aggregates responses from data nodes\n    \u2502   \u2514\u2500\u2500 Sends final response to the client\n    \u2514\u2500\u2500 Ingest Node\n        \u251c\u2500\u2500 Processes and transforms data before indexing\n        \u2514\u2500\u2500 Enriches data with pipelines\n\nIndex\n\u2514\u2500\u2500 Shards (Primary and Replica)\n    \u2514\u2500\u2500 Lucene Index (Each shard is a Lucene index)\n        \u251c\u2500\u2500 Segments (Immutable data units in a shard)\n        \u2502    \u251c\u2500\u2500 Inverted Index\n        \u2502    \u251c\u2500\u2500 Doc Values\n        \u2502    \u2514\u2500\u2500 BKD Trees (for numeric & geo fields)\n        \u2514\u2500\u2500 Documents (JSON objects representing data records)(within segments)\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-of-cluster-roles-and-data-flow","title":"6. Summary of Cluster Roles and Data Flow","text":"Component Description Responsibilities Cluster Top-level structure with multiple nodes Manages overall data distribution, availability, and search Master Node Brain of the cluster Handles cluster state, shard allocation, and fault tolerance Data Node Primary storage and processing node Stores data, handles indexing, querying, and replica management Coordinating Node Routes and aggregates client requests Routes requests to data nodes, aggregates responses, and sends back to clients Ingest Node Data transformation node Preprocesses data with pipelines, ideal for parsing and enrichment Index Logical grouping of documents Organizes data for efficient storage and querying Shard Distributed subset of index data Represents a Lucene index with primary and replica copies Segment Immutable unit in a shard Stores indexed data for fast read access Document Basic data unit in segments JSON object representing individual data records"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#thread-pools","title":"Thread Pools","text":"

Diving into Elasticsearch\u2019s search and other thread pools is crucial to understanding its performance and scalability. These pools are essential for managing various tasks like indexing, searching, and handling incoming requests. Let\u2019s go through these pools from end to end, covering configurations, management, and performance metrics to monitor.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-overview","title":"1. Overview","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-detailed-breakdown-of-key-pools","title":"2. Detailed Breakdown of Key Pools","text":"

Here\u2019s a closer look at each pool, along with common configurations and considerations:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-configuring-and-tuning-thread-pools","title":"3. Configuring and Tuning Thread Pools","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-monitoring-metrics-for-thread-pools","title":"4. Monitoring Metrics for Thread Pools","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-example-scenarios-and-best-practices","title":"5. Example Scenarios and Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-table-of-pools","title":"6. Summary Table of Pools","text":"Thread Pool Purpose Default Threads Default Queue Size Key Metrics Tuning Tips Search Pool Processes search queries, aggregations # of processors * 3 1000 search_pool.active, search_pool.queue, search_pool.rejected Increase queue_size if many queries are queued; monitor memory usage to prevent OutOfMemory issues. Index Pool Handles indexing requests for documents # of processors 200 index_pool.active, index_pool.queue, index_pool.rejected For high indexing rates, increase queue size and thread count as necessary. Get Pool Retrieves individual documents # of processors * 2 1000 get_pool.active, get_pool.queue, get_pool.rejected Increase queue_size if retrieval requests are high; monitor latency and resource usage. Bulk Pool Processes bulk indexing operations # of processors * 2 50 bulk_pool.active, bulk_pool.queue, bulk_pool.rejected Keep queue_size modest to limit memory use; monitor latency during high bulk loads. Management Pool Manages maintenance tasks like merges 1\u20135 5 management_pool.active, management_pool.queue Generally low usage; monitor only if queue is frequently non-empty, indicating background task delays. Snapshot Pool Handles snapshot creation and restoration 1\u20132 5 snapshot_pool.active, snapshot_pool.queue Schedule snapshots during low-activity periods; adjust resources if snapshots interfere with other tasks.

These pools are essential to optimizing Elasticsearch\u2019s handling of diverse workloads. Monitoring and adjusting each pool based on your workload ensures better performance and resource management across the Elasticsearch cluster.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#caching","title":"Caching","text":"

In Elasticsearch, caching plays a vital role in speeding up queries by storing frequently accessed data at various levels, minimizing I/O operations and improving response times. Here\u2019s a detailed, end-to-end look at caching in Elasticsearch, from the lowest level to the highest, covering each caching mechanism and its role.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-types-of-caches-in-elasticsearch","title":"1. Types of Caches in Elasticsearch","text":"

There are several caching mechanisms in Elasticsearch, each working at a different level:

Each of these caches serves a specific purpose and optimizes a different aspect of query processing.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-filesystem-cache-os-level","title":"2. Filesystem Cache (OS-level)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-shard-level-cache","title":"3. Shard-level Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-field-data-cache","title":"3.1. Field Data Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-query-cache","title":"3.2. Query Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-request-cache","title":"3.3. Request Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-node-level-cache","title":"4. Node-level Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#41-segment-cache","title":"4.1. Segment Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#42-indices-cache","title":"4.2 Indices Cache","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cache-tuning-and-best-practices","title":"5. Cache Tuning and Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-summary-table-of-caching-mechanisms","title":"6. Summary Table of Caching Mechanisms","text":"Cache Type Level Purpose Eviction Policy Key Metrics Tuning Tips Filesystem Cache OS Stores files and segments in OS memory Managed by OS N/A Ensure ample OS memory for larger index caching Field Data Cache Shard-level Caches field data for aggregations and sorting LRU-based, configurable fielddata.memory_size, fielddata.evictions Increase size for high aggregation requirements Query Cache Shard-level Caches individual filter query results LRU-based, configurable query_cache.memory_size, query_cache.evictions, query_cache.hit_count, query_cache.miss_count Monitor hit/miss ratios to determine effectiveness Request Cache Shard-level Caches entire search request results LRU-based, per-index request_cache.memory_size, request_cache.evictions, request_cache.hit_count, request_cache.miss_count Best for aggregations on static data Segment Cache Node-level Caches Lucene index segments and postings Managed by Lucene segment.memory_in_bytes, segments.count Larger heap size improves cache efficiency Indices Cache Node-level Caches index metadata (mappings, settings) LRU-based indices.memory, indices.evictions Adjust based on frequency of metadata updates

Each caching layer works in tandem to optimize query speed and efficiency, and monitoring these caches is essential for fine-tuning Elasticsearch performance to meet your specific use cases.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#index-life-management-ilm","title":"Index Life Management - ILM","text":"

Elasticsearch\u2019s Hot-Warm-Cold (Hot-Warm-Cold-Frozen) architecture is part of its Index Lifecycle Management (ILM) system, designed to optimize storage and cost-efficiency for managing data that has different usage patterns over time. This architecture allows you to store data on different types of nodes (hot, warm, cold, and frozen) based on data retention needs and access frequency. Here\u2019s an in-depth look at each phase and how to effectively manage data with Elasticsearch\u2019s ILM:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-purpose-of-hot-warm-cold-architecture","title":"1. Purpose of Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-phases-in-ilm-index-lifecycle-management","title":"2. Phases in ILM (Index Lifecycle Management)","text":"

The ILM policy defines actions to transition data through different stages based on time or data access patterns:

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-hot-phase-high-performance-node-configuration","title":"3. Hot Phase (High-Performance Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-warm-phase-mid-performance-node-configuration","title":"4. Warm Phase (Mid-Performance Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cold-phase-cost-effective-node-configuration","title":"5. Cold Phase (Cost-Effective Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-frozen-phase-archival-node-configuration","title":"6. Frozen Phase (Archival Node Configuration)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-delete-phase-optional-phase","title":"7. Delete Phase (Optional Phase)","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-setting-up-ilm-policies","title":"8. Setting Up ILM Policies","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-cluster-resource-optimization-with-hot-warm-cold-architecture","title":"9. Cluster Resource Optimization with Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#10-example-ilm-policy","title":"10. Example ILM Policy","text":"

Here\u2019s an example ILM policy to illustrate phase-based transitions for a log analytics use case:

{\n  \"policy\": {\n    \"phases\": {\n      \"hot\": {\n        \"actions\": {\n          \"rollover\": {\n            \"max_age\": \"7d\",\n            \"max_size\": \"50gb\"\n          },\n          \"set_priority\": {\n            \"priority\": 100\n          }\n        }\n      },\n      \"warm\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n          \"allocate\": {\n            \"number_of_replicas\": 1\n          },\n          \"forcemerge\": {\n            \"max_num_segments\": 1\n          },\n          \"set_priority\": {\n            \"priority\": 50\n          }\n        }\n      },\n      \"cold\": {\n        \"min_age\": \"90d\",\n        \"actions\": {\n          \"allocate\": {\n            \"require\": {\n              \"data\": \"cold\"\n            }\n          },\n          \"freeze\": {},\n          \"set_priority\": {\n            \"priority\": 0\n          }\n        }\n      },\n      \"delete\": {\n        \"min_age\": \"365d\",\n        \"actions\": {\n          \"delete\": {}\n        }\n      }\n    }\n  }\n}\n
"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-advantages-of-hot-warm-cold-architecture","title":"11. Advantages of Hot-Warm-Cold Architecture","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-best-practices","title":"12. Best Practices","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-summary-table","title":"13. Summary Table","text":"Phase Data Usage Hardware ILM Actions Typical Use Cases Hot Active, high access High CPU, SSD, large RAM Rollover, force merge Real-time search, recent logs, app monitoring Warm Mid-range access Moderate CPU, SSD or HDD, RAM Shrink, force merge, reallocate Data analytics on recent history, dashboard views Cold Infrequent access Low CPU, HDD, minimal RAM Freeze, move to cold nodes Compliance storage, infrequent analysis Frozen Rarely accessed Minimal CPU/RAM, cloud storage Unfreeze on access, move to frozen node Long-term archival, compliance data Delete Expired data N/A Delete Data lifecycle cleanup

This Hot-Warm-Cold architecture in Elasticsearch enables you to balance cost, performance, and data accessibility across various hardware configurations, ensuring that data is always stored cost-effectively without compromising on necessary access patterns.

"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#list-of-rest-apis","title":"List of REST APIs","text":"API Endpoint Purpose Description Typical Latency Use Cases Index APIs POST /{index}/_doc Index a Document Adds or updates a document in the specified index. Low (~5-10ms for small docs) Real-time data ingestion, document updates. POST /{index}/_bulk Bulk Indexing Allows indexing, updating, or deleting multiple documents in a single request. Low to Medium (~10-50ms) High-volume data ingestion, ETL processes. POST /{index}/_update/{id} Update a Document Partially updates a document by ID in the specified index. Low to Medium (~10-30ms) Updating specific fields in documents. DELETE /{index}/_doc/{id} Delete a Document Deletes a document by ID from the specified index. Low (~5-10ms) Document removal based on unique IDs. Search APIs GET /{index}/_search Search Documents Executes a search query with optional filters, aggregations, and pagination. Medium to High (~10-100ms, based on complexity) Full-text search, structured queries, analytics. POST /{index}/_search/scroll Scroll Search Enables retrieving large datasets by scrolling through search results. Medium to High (~50-200ms) Pagination for large datasets, data exports. DELETE /_search/scroll Clear Scroll Context Clears scroll contexts to free up resources. Low (~5ms) Resource management after scroll search. POST /_msearch Multi-Search Allows execution of multiple search queries in a single request. Medium to High (~20-150ms) Batch querying, dashboard visualizations. POST /{index}/_count Count Documents Counts the documents matching a query without returning full results. Low (~5-20ms) Quick counts of filtered datasets. Aggregation APIs POST /{index}/_search Aggregation Queries Used with the search API to retrieve aggregated data (e.g., histograms, averages). Medium to High (~20-150ms) Analytics, reporting, data summarization. Cluster and Node APIs GET /_cluster/health Cluster Health Returns health information on the cluster, nodes, and indices. Low (~5ms) Monitoring cluster health and node status. GET /_cluster/stats Cluster Statistics Provides statistics on cluster status, node usage, and storage. Low to Medium (~5-20ms) Cluster-wide monitoring and performance analysis. POST /_cluster/reroute Cluster Reroute Manually reroutes shards in the cluster. Medium (~20-50ms) Shard management and rebalancing. GET /_nodes/stats Node Statistics Returns stats for nodes, including CPU, memory, and thread pools. Low to Medium (~5-20ms) Node health monitoring, resource usage analysis. GET /_cat/nodes List Nodes Provides a list of all nodes in the cluster in a human-readable format. Low (~5ms) Node overview, node status. GET /_cat/indices List Indices Lists all indices with metadata on size, health, and document count. Low (~5ms) Index management and monitoring. Index Management APIs PUT /{index} Create Index Creates a new index with specific settings and mappings. Low (~10-20ms) Index setup and schema definition. DELETE /{index} Delete Index Deletes the specified index. Low (~5-10ms) Index removal, data management. PUT /{index}/_settings Update Index Settings Updates settings (e.g., refresh interval) of an index. Low (~10ms) Dynamic adjustments of index settings. POST /{index}/_refresh Refresh Index Refreshes an index to make recent changes searchable. Medium (~10-30ms) Ensures data is available for search in near-real-time. POST /{index}/_forcemerge Force Merge Index Reduces the number of segments in an index to optimize storage. High (~100ms - 1s+) Optimize index storage, improve search speed. Cache and Memory Management POST /{index}/_cache/clear Clear Index Cache Clears the cache for the specified index. Low (~5ms) Cache management for performance tuning. POST /_flush Flush Index Writes all buffered changes to disk. Medium (~10-30ms) Data durability, cache clearing. POST /{index}/_refresh Refresh Makes recent changes to an index visible to search. Medium (~10-30ms) Near real-time updates in the search index. Snapshot and Backup APIs PUT /_snapshot/{repo}/{snapshot} Create Snapshot Creates a snapshot of indices for backup. High (dependent on index size) Data backup, disaster recovery. GET /_snapshot/{repo}/{snapshot} Get Snapshot Status Checks the status of an existing snapshot. Low (~5ms) Monitor snapshot progress, status checks. DELETE /_snapshot/{repo}/{snapshot} Delete Snapshot Deletes an existing snapshot. Medium (~10-20ms) Snapshot lifecycle management, freeing storage. Security and Role Management POST /_security/role/{role} Create/Update Role Creates or updates a security role with specific permissions. Low (~5-10ms) Access control, role-based permissions. POST /_security/user/{user} Create/Update User Creates or updates a user in Elasticsearch. Low (~5-10ms) User management, access permissions. GET /_security/_authenticate Authenticate User Authenticates the current user. Low (~5ms) Session management, authentication checks."},{"location":"techdives/DistrubutedSystems/ElasticSearch/#additional-notes","title":"Additional Notes","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#capacity-planning-cluster-design","title":"Capacity Planning & Cluster Design","text":"Parameter Formula / Best Practice Description Example Calculation Total Data Nodes $ \\text{Total Shards} \\times \\text{Shard Size} / \\text{Node Disk Capacity} $ Calculate required data nodes based on total shard size and node storage. 20 shards of 30GB each on 600GB nodes \u2192 1 data node Heap Memory Allocation $ \\text{Total Node RAM} \\times 0.5 $ (up to 32GB max) Allocate 50% of node memory to JVM heap, up to 32GB, to optimize memory usage. Node with 64GB RAM \u2192 32GB heap Storage Requirement $ \\text{Total Data Size} \\times (1 + \\text{Replication Factor}) \\times \\text{Retention Period (months)} $ Plan storage capacity based on expected data size and replication. 1TB data, 1 replica, 1-month retention \u2192 2TB Ideal Shard Size 20-50 GB per shard Recommended shard size for optimal performance and manageability. Aim for shards ~30GB each for balanced load Shards per Index $ \\text{Total Data Size} / \\text{Target Shard Size} $ Calculate shard count to avoid excessive shard management overhead. 500GB index, 25GB target shard size \u2192 20 shards Max Shard Count Avoid more than 20 shards per GB of heap Ensure shard count doesn\u2019t exceed a level that can cause memory strain. 32GB heap \u2192 max ~640 shards across all indices Master Nodes Minimum of 3 Dedicated master nodes ensure quorum-based fault tolerance and cluster stability. At least 3 master nodes for high availability Coordinating Nodes 1 per 10 data nodes in large clusters Handle query load without adding data nodes, especially with complex aggregation queries. 50 data nodes \u2192 5 coordinating nodes CPU Requirement 4-8 CPUs per data node Allocate enough CPUs to handle search and indexing operations without bottlenecks. 4-8 CPUs per node for typical workloads Disk I/O Throughput Minimum of 300 MB/s (write-heavy) For write-intensive clusters, ensure sufficient I/O throughput for data nodes. SSDs or fast disks are recommended Disk Usage Threshold Keep below 75% disk usage per node Avoid exceeding 75% disk usage on data nodes to prevent performance degradation. Monitor for 75% threshold, Elasticsearch throttles at ~85% Index Throttle State Throttles at ~85% disk usage Elasticsearch throttles indexing if nodes exceed 85% disk usage. Configure alerts to prevent reaching throttle state Memory Usage - Field Data Cache indices.fielddata.cache.size based on available heap Field data cache size impacts sorting and aggregation speed, stored in memory. Set to 20-30% of available heap for high-aggregation workloads Query Cache Size 10% of heap by default (adjustable) Caches filter queries to speed up repeated search operations. Increase for clusters with frequent repetitive queries Request Cache Size Enabled per index, LRU-based Cache complete search requests, especially useful for aggregation-heavy queries on static data. Enable on indices with frequent aggregation queries Cluster Health GET /_cluster/health (monitor green, yellow, red status) Regularly monitor cluster health to identify potential issues in shard allocation and node status. Alerts for yellow or red statuses to detect unallocated shards Pending Tasks GET /_cluster/pending_tasks Track the number of pending tasks; delays may signal node or cluster overload. Monitor to ensure tasks are processed promptly CPU Usage per Node Track via node.cpu.percent Monitor CPU load on nodes, especially data and coordinating nodes handling heavy queries. Keep below 80% for balanced performance Search Latency Monitor search.fetch_time and search.query_time Search latency metrics indicate performance bottlenecks; high values can suggest tuning is needed. Target <100ms for interactive queries, <500ms for aggregations Indexing Latency Monitor indexing.index_time Tracks indexing speed; high values indicate indexing bottlenecks. Optimize disk I/O if consistently high GC Pause Time Track jvm.gc.collection_time Excessive GC pause time (>100ms) can degrade performance, especially on data nodes. Keep heap usage <75% to avoid frequent GC pauses Disk Utilization disk.used_percent Ensure disk usage remains within optimal limits to prevent resource contention. Monitor for high usage, keep below 75% Heap Usage per Node jvm.heap_used_percent Monitor heap usage across nodes; values near 100% can trigger frequent GC and degrade performance. Keep below 75% for stable performance Shard Count per Node Shards should not exceed 50-75 per data node Optimal shard count balances memory usage and search latency. Distribute shards evenly to avoid bottlenecks Index Rollover Frequency Based on data ingestion and retention policy Use index rollover for high-ingestion use cases to manage shard size and count. Time-based or size-based rollover (e.g., daily, 10GB) Snapshot Frequency Schedule during off-peak hours Regular snapshots for backup without affecting active workloads. Daily or weekly snapshots for disaster recovery Ingest Node CPU Requirement Optimize for transformation-heavy workloads Ingest nodes need higher CPU for ETL tasks before indexing. ~8 CPUs per ingest node for transformation-heavy clusters Write Thread Pool Size Controlled via thread_pool.write.size Configure thread pool size for write-heavy workloads. Default based on available processors; increase for high-write loads Bulk Thread Pool Size Set via thread_pool.bulk.size Bulk operations often have separate thread pools, useful for high-ingestion clusters. Default based on processors; increase if high bulk ingestion Query Throughput Measure search.thread_pool.queue The search queue size indicates if the search load is too high, leading to delays. Keep queue size low to avoid bottlenecks Bulk Queue Size Monitor bulk.thread_pool.queue Large bulk queue size indicates ingestion pressure; tune for high-ingestion needs. Increase queue size or add ingest nodes for bulk-heavy workloads Network Throughput Monitor network interface utilization High network usage can impact inter-node communication, especially during replication. Ensure sufficient network bandwidth for large clusters Network Latency Track round-trip time between nodes Low-latency network is critical for distributed search and replication. <1ms recommended within data centers, 1-5ms for cross-regions"},{"location":"techdives/DistrubutedSystems/ElasticSearch/#questions","title":"Questions","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#1-sql-or-nosql","title":"1. SQL or NoSQL:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#2-main-feature-origin-and-design","title":"2. Main Feature, Origin, and Design:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#3-olap-or-oltp","title":"3. OLAP or OLTP:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#4-acid-or-base","title":"4. ACID or BASE:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#5-cap-theorem","title":"5. CAP Theorem:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#6-cluster-structure","title":"6. Cluster Structure:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#7-fundamental-building-blocks","title":"7. Fundamental Building Blocks:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#8-multi-master-support","title":"8. Multi-Master Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#9-master-data-node-relation","title":"9. Master-Data Node Relation:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#10-node-structures-in-cluster","title":"10. Node Structures in Cluster:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#11-cluster-scaling-support","title":"11. Cluster Scaling Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#12-high-availability","title":"12. High Availability:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#13-fault-tolerance","title":"13. Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#14-replication","title":"14. Replication:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#15-partition","title":"15. Partition:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#16-sharding","title":"16. Sharding:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#17-caching-in-depth","title":"17. Caching in Depth:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#18-storage-type","title":"18. Storage Type:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#19-segmentpage-approach","title":"19. Segment/Page Approach:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#20-trees-for-storage","title":"20. Trees for Storage:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#21-indexing","title":"21. Indexing:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#22-routing","title":"22. Routing:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#23-latency","title":"23. Latency:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#24-versioning","title":"24. Versioning:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#25-locking-and-concurrency","title":"25. Locking and Concurrency:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#26-write-ahead-log-wal","title":"26. Write-Ahead Log (WAL):","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#27-change-data-capture-cdc","title":"27. Change Data Capture (CDC):","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#28-query-type-and-query","title":"28. Query Type and Query:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#29-query-optimizers","title":"29. Query Optimizers:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#30-sql-support","title":"30. SQL Support:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#31-circuit-breakers","title":"31. Circuit Breakers:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#32-data-retention-lifecycle-management","title":"32. Data Retention / Lifecycle Management:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#33-other-features","title":"33. Other Features:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#34-modules-or-libraries","title":"34. Modules or Libraries:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#35-optimization-and-tuning","title":"35. Optimization and Tuning:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#36-backup-and-recovery","title":"36. Backup and Recovery:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#37-security","title":"37. Security:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#38-migration","title":"38. Migration:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#39-recommended-cluster-setup","title":"39. Recommended Cluster Setup:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#40-basic-cluster-setup","title":"40. Basic Cluster Setup:","text":""},{"location":"techdives/DistrubutedSystems/ElasticSearch/#references","title":"References","text":"
  1. https://www.elastic.co/guide/en/elasticsearch/reference/8.15/index.html
  2. https://www.hellointerview.com/learn/system-design/deep-dives/elasticsearch
  3. https://j.blaszyk.me/tech-blog/exploring-apache-lucene-index/
  4. https://medium.com/swlh/bkd-trees-used-in-elasticsearch-40e8afd2a1a4
  5. https://www.paradedb.com/blog/elasticsearch_vs_postgres
  6. https://nsvarun14.medium.com/capacity-planning-for-elasticsearch-cde3c0693add
  7. https://fdv.github.io/running-elasticsearch-fun-profit/004-cluster-design/004-cluster-design.html
  8. https://medium.com/@sureshkumar.pawar/sizing-your-elk-elasticsearch-logstash-kibana-cluster-for-high-performance-398fe6e591d4
  9. https://www.infoq.com/articles/similarity-scoring-elasticsearch/
  10. https://www.elastic.co/blog/practical-bm25-part-2-the-bm25-algorithm-and-its-variables
  11. https://medium.com/@niteshsaini/how-elasticsearch-calculates-its-relevance-score-e762c6274004
"},{"location":"techdives/DistrubutedSystems/Kafka/","title":"Kafka","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#what-is-kafka","title":"What is Kafka?","text":"

Kafka is a distributed event streaming platform that allows applications to publish, store, and consume streams of data in real-time or batch mode. It is designed to handle continuous streams of events or records by functioning as a distributed commit log, where data is written sequentially and can be read independently by multiple consumers.

Kafka follows a publish-subscribe model where producers write data to topics divided into partitions, and consumers pull data from those partitions at their own pace. Kafka ensures that data flows reliably between producers and consumers, with fault-tolerant replication and durable storage to prevent data loss.

At its heart, Kafka provides three core functionalities: 1. Message Streaming: Enabling systems to send and receive continuous streams of data asynchronously. 2. Durable Storage: Persisting messages on disk, ensuring data is not lost even in case of failures. 3. Distributed Processing: Allowing data to be partitioned and processed across multiple servers for scalability and fault tolerance.

"},{"location":"techdives/DistrubutedSystems/Kafka/#core-components-and-keywords","title":"Core Components and Keywords","text":"Component / Keyword Description Topic A logical channel for data, used to categorize messages. Topics are divided into partitions for parallelism. Partition A segment of a topic that stores messages in a log structure. It ensures parallel processing. Each partition contains messages with offsets to track their position. Producer A client or application that publishes messages to a Kafka topic. Producers can distribute messages across partitions. Consumer A client or application that subscribes to Kafka topics and reads messages from partitions at their own pace. Broker A Kafka server that stores messages, handles requests, and coordinates with other brokers in a Kafka cluster. Kafka Cluster A group of multiple Kafka brokers working together to provide scalability and fault tolerance. Zookeeper / KRaft Zookeeper is used for metadata management and leader election in older Kafka versions. Newer versions replace Zookeeper with KRaft (Kafka Raft) for native metadata management. Offset A unique identifier for each message within a partition, representing its position in the log. Consumers use offsets to track the messages they have processed. Replication Kafka duplicates partitions across multiple brokers to ensure fault tolerance and data availability. Leader Partition The primary replica of a partition that handles all reads and writes for that partition. Other replicas act as followers. Follower Partition A copy of the leader partition that replicates its data. If the leader fails, a follower can take over. Consumer Group A group of consumers sharing the same group ID, ensuring that each partition is consumed by only one member of the group at any given time. In-Sync Replica (ISR) A replica that is up-to-date with the leader partition. Kafka promotes an ISR as the new leader if the current leader fails. Acknowledgments (ACKs) A producer configuration that defines when a message is considered successfully sent (e.g., only after being replicated to all followers). Retention Policy A configuration that determines how long Kafka retains messages before deleting or compacting them. Messages can be removed based on time or size limits. Log Compaction A process that keeps only the latest version of a key within a topic, useful for data clean-up and long-term storage. Controller A designated broker responsible for managing partition leadership and cluster rebalancing. Kafka Streams A lightweight client library used for processing and analyzing data streams directly within Kafka. Kafka Connect A framework for integrating Kafka with external systems by providing connectors for data ingestion and extraction."},{"location":"techdives/DistrubutedSystems/Kafka/#how-kafka-achieves-high-scalability","title":"How Kafka Achieves High Scalability ?","text":"

Kafka\u2019s design is fundamentally scalable, allowing it to handle millions of events per second efficiently. It achieves this by leveraging distributed architecture, partitioning, horizontal scaling, and several load balancing strategies. Let\u2019s explore the mechanisms, architecture, and workflows that enable Kafka to scale end-to-end.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-partitioning-the-foundation-of-scalability","title":"1. Partitioning: The Foundation of Scalability","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#partition-key-and-load-distribution","title":"Partition Key and Load Distribution","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#impact-of-more-partitions","title":"Impact of More Partitions","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#2-distributed-broker-architecture","title":"2. Distributed Broker Architecture","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#partition-replication-across-brokers","title":"Partition Replication Across Brokers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#3-horizontal-scaling-add-more-brokers-or-partitions","title":"3. Horizontal Scaling: Add More Brokers or Partitions","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#scaling-the-producer-and-consumer-layers","title":"Scaling the Producer and Consumer Layers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#4-producer-scalability-load-balancing-and-batch-processing","title":"4. Producer Scalability: Load Balancing and Batch Processing","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#5-consumer-scalability-consumer-groups-and-load-sharing","title":"5. Consumer Scalability: Consumer Groups and Load Sharing","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#rebalancing-consumers","title":"Rebalancing Consumers","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#6-efficient-load-balancing-and-rebalancing-mechanisms","title":"6. Efficient Load Balancing and Rebalancing Mechanisms","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#sticky-partitioning-strategy","title":"Sticky Partitioning Strategy","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#7-network-optimization-and-zero-copy-technology","title":"7. Network Optimization and Zero-Copy Technology","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#8-broker-level-optimizations-and-compression","title":"8. Broker-Level Optimizations and Compression","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#9-kafka-controller-and-partition-leadership","title":"9. Kafka Controller and Partition Leadership","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#10-handling-backpressure-for-scalability","title":"10. Handling Backpressure for Scalability","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#11-configuring-for-high-scalability-tuning-parameters","title":"11. Configuring for High Scalability: Tuning Parameters","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-high-scalability-mechanisms","title":"Summary of Kafka\u2019s High Scalability Mechanisms","text":"Aspect How Kafka Scales Partitioning Divides topics into partitions for parallelism. Distributed Brokers Workload spread across multiple brokers. Horizontal Scaling New brokers or partitions can be added dynamically. Producer Parallelism Producers write to multiple partitions concurrently. Consumer Groups Consumers share partitions for parallel processing. Rebalancing Redistributes workload when brokers/consumers change. Batching & Compression Reduces I/O and network overhead. Zero-Copy Technology Efficient data transfer with low CPU usage."},{"location":"techdives/DistrubutedSystems/Kafka/#how-kafka-achieves-high-availability-and-fault-tolerance","title":"How Kafka Achieves High Availability and Fault Tolerance ?","text":"

Kafka\u2019s design for high availability and fault tolerance centers on replication, leader election, distributed brokers, and self-healing mechanisms. Together, these mechanisms ensure Kafka can handle hardware, software, and network failures, while maintaining data integrity, durability, and continuity.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-partition-replication-foundation-of-ha-and-ft","title":"1. Partition Replication \u2013 Foundation of HA and FT","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#how-replication-ensures-ha-and-ft","title":"How Replication Ensures HA and FT:","text":"
  1. Leader-follower model: Only the leader replica handles read and write requests. Followers passively replicate the leader\u2019s data.
  2. Automatic failover: If the leader broker fails, one of the followers (from the in-sync replica set, or ISR) is promoted as the new leader, ensuring the partition remains available.
  3. This setup ensures continuous data availability and minimal downtime when individual brokers fail.
"},{"location":"techdives/DistrubutedSystems/Kafka/#2-leader-election-and-failover-mechanism","title":"2. Leader Election and Failover Mechanism","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#leader-election-process-during-failures","title":"Leader Election Process During Failures:","text":"
  1. Detection: Zookeeper (or KRaft) detects a failed broker.
  2. Election: The controller broker initiates a leader election for partitions on the failed broker.
  3. Promotion: An in-sync follower (replica that\u2019s fully up-to-date) is promoted to leader.
  4. Metadata update: Kafka updates cluster metadata to reflect the new leader, ensuring clients redirect requests to the new leader.

This automated and rapid leader election ensures Kafka remains operational with minimal interruption.

"},{"location":"techdives/DistrubutedSystems/Kafka/#3-in-sync-replicas-isr-ensuring-data-integrity","title":"3. In-Sync Replicas (ISR) \u2013 Ensuring Data Integrity","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#role-of-isr-in-minimizing-data-loss","title":"Role of ISR in Minimizing Data Loss:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#4-controller-broker-managing-ha-and-ft-orchestration","title":"4. Controller Broker \u2013 Managing HA and FT Orchestration","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#5-distributed-brokers-avoiding-single-points-of-failure","title":"5. Distributed Brokers \u2013 Avoiding Single Points of Failure","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#how-broker-distribution-improves-ha-and-ft","title":"How Broker Distribution Improves HA and FT:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#6-acknowledgment-policies-acks-ensuring-data-durability","title":"6. Acknowledgment Policies (ACKs) \u2013 Ensuring Data Durability","text":"

Kafka provides acknowledgment modes for tuning data durability against performance. These settings control when a message is considered \u201csuccessfully written\u201d:

"},{"location":"techdives/DistrubutedSystems/Kafka/#role-of-acks-in-fault-tolerance","title":"Role of ACKs in Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#7-log-compaction-and-retention-for-data-availability","title":"7. Log Compaction and Retention for Data Availability","text":"

Kafka employs log compaction and retention to maintain long-term data availability:

  1. Time-based retention: Kafka retains messages for a configurable period (e.g., 7 days).
  2. Size-based retention: Kafka deletes old messages once partition logs reach a certain size.
"},{"location":"techdives/DistrubutedSystems/Kafka/#log-compaction-for-data-recovery","title":"Log Compaction for Data Recovery:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#8-handling-split-brain-scenarios-consistency-through-quorums","title":"8. Handling Split-Brain Scenarios \u2013 Consistency Through Quorums","text":"

A split-brain scenario happens when a broker loses connectivity with others, risking data inconsistency.

"},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-approach-to-preventing-split-brain","title":"Kafka\u2019s Approach to Preventing Split-Brain:","text":"

This quorum-based replication prevents data corruption during network partitions.

"},{"location":"techdives/DistrubutedSystems/Kafka/#9-self-healing-and-automated-recovery","title":"9. Self-Healing and Automated Recovery","text":"

Kafka\u2019s self-healing mechanisms enable it to quickly recover from broker or replica failures:

These self-healing features maintain availability and data consistency without requiring manual intervention.

"},{"location":"techdives/DistrubutedSystems/Kafka/#10-multi-datacenter-replication-for-disaster-recovery","title":"10. Multi-Datacenter Replication for Disaster Recovery","text":"

Kafka supports multi-datacenter replication for cross-region fault tolerance using tools like Kafka MirrorMaker.

"},{"location":"techdives/DistrubutedSystems/Kafka/#how-multi-cluster-replication-ensures-availability-and-fault-tolerance","title":"How Multi-Cluster Replication Ensures Availability and Fault Tolerance:","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#11-client-side-handling-of-failures-for-ha-and-ft","title":"11. Client-Side Handling of Failures for HA and FT","text":"

Kafka\u2019s producers and consumers have built-in resilience to handle failures gracefully:

"},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-high-availability-and-fault-tolerance-mechanisms","title":"Summary of Kafka\u2019s High Availability and Fault Tolerance Mechanisms","text":"Mechanism How It Ensures HA and FT Partition Replication Multiple copies of data across brokers ensure availability even during broker failures. Leader Election Automatically promotes a follower to leader when the leader broker fails. In-Sync Replicas (ISR) Only fully synchronized replicas can be promoted to leader to prevent data loss. Controller Broker Manages partition leadership and rebalancing operations, ensuring consistent cluster state. Distributed Brokers Spreads data across brokers to avoid single points of failure. Dynamic Rebalancing Adjusts workload across brokers and consumers to handle changes or failures smoothly. Acknowledgment Policies (ACKs) Ensures data is safely replicated before acknowledging writes, reducing data loss risk. Log Compaction Maintains the latest data state for recovery during consumer or application failures. Client-Side Recovery Producers and consumers handle broker failures with retries and automatic rebalancing. Network Partition Handling Uses Zookeeper/KRaft to prevent split-brain scenarios and ensure data consistency. Multi-Datacenter Replication Provides disaster recovery and redundancy across regions."},{"location":"techdives/DistrubutedSystems/Kafka/#kafka-features-impacts-on-ha-and-ft","title":"Kafka features Impacts on HA and FT","text":"Feature/Configuration Configuration Details Impact on HA Impact on FT Explanation Partition Replication Set a high replication factor (e.g., 3 or 5) High High Ensures multiple copies of data across brokers; if the leader fails, a follower can be promoted, maintaining data availability and durability. In-Sync Replicas (ISR) Use acks=all to ensure ISR sync before acknowledgments Moderate High Guarantees data consistency by ensuring messages are replicated to all ISR replicas before confirming writes, reducing data loss. Leader Election Mechanism Managed by Zookeeper or KRaft for automatic failover High Moderate Automatically promotes a follower to leader when the current leader fails, minimizing downtime. Controller Broker Redundancy provided by re-election if the current controller fails High Moderate Ensures the orchestrator of metadata and rebalancing has a backup, maintaining consistent cluster operations. Distributed Broker Placement Spread partitions across brokers; no two replicas on the same broker High High Reduces the risk of data unavailability and loss by preventing single points of failure. Rebalancing Strategy Configure partition reassignment for balanced broker load High Low Prevents overload on individual brokers, enhancing availability; this has less impact on data durability. Acknowledgment Policy (ACKs) Set acks=all for highest data durability Low High Ensures writes are only confirmed after replication to all ISR replicas, reducing the risk of data loss. Log Compaction Enable for compacted topics to retain latest state Moderate Moderate Retains the latest state of each key, useful for stateful applications; supports recovery but doesn\u2019t guarantee availability. Retention Policies Configure time-based or size-based retention High Low Maintains historical data for consumer recovery, contributing to high availability if consumers fall behind. Client Retry Mechanisms Configure producer and consumer retries High Low Enables producers and consumers to handle temporary broker unavailability, ensuring continuous operation. Consumer Group Rebalancing Set rebalancing policies to avoid bottlenecks High Low Ensures efficient load distribution among consumers, enhancing availability but minimally impacting data durability. Multi-Datacenter Replication Enable with Kafka MirrorMaker or similar tools High High Provides cross-region redundancy for both availability and fault tolerance, especially critical for disaster recovery. Backpressure Handling Use offset tracking and monitoring Moderate High Allows consumers to fall behind producers without causing data loss, enhancing fault tolerance by protecting against backpressure failures. Split-Brain Handling Managed by Zookeeper/KRaft to avoid conflicting writes Low High Prevents data inconsistency by ensuring only one leader exists per partition, critical for consistency in partitioned network conditions. Log Recovery Enable brokers to rebuild from log segments on restart Moderate High Ensures brokers can recover their state after a crash, minimizing data loss and ensuring continuity post-restart.

Kafka\u2019s architecture for high availability and fault tolerance ensures the system remains resilient under various failure scenarios. Through partition replication, leader election, dynamic rebalancing, and multi-datacenter replication, Kafka provides a robust infrastructure with no single points of failure and near-zero downtime, making it reliable for critical real-time data streaming and processing applications.

"},{"location":"techdives/DistrubutedSystems/Kafka/#what-makes-kafka-unique","title":"What Makes Kafka Unique ?","text":"

Append only Log-Based Architecture and High-Throughput with Low-Latency Design two of Kafka\u2019s core features that make it unique.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-log-based-architecture-the-foundation-of-kafkas-data-model","title":"1. Log-Based Architecture: The Foundation of Kafka\u2019s Data Model","text":"

Kafka\u2019s log-based architecture is what makes it fundamentally different from traditional messaging systems. It\u2019s built on the concept of a distributed, partitioned, and immutable log, allowing Kafka to scale, preserve data ordering, and enable consumers to replay data as needed. Here\u2019s a deep dive into what this architecture entails and why it\u2019s special.

"},{"location":"techdives/DistrubutedSystems/Kafka/#how-log-based-architecture-works","title":"How Log-Based Architecture Works","text":"
  1. Topics and Partitions as Logs:
  2. In Kafka, each topic is split into partitions, and each partition acts as an independent log.
  3. Within a partition, messages are appended sequentially in an ordered, immutable fashion, with each message assigned an offset (a unique, incremental ID).
  4. Offsets serve as pointers to each message\u2019s position within the log, making it easy for consumers to track their progress.

  5. Immutable Log Storage:

  6. Messages in each partition are stored as an append-only log\u2014once written, messages cannot be modified or deleted (unless retention policies specify otherwise).
  7. This immutability provides consistency and durability, as every message has a fixed position within the partition log.

  8. Replayable and Persistent Data:

  9. Kafka\u2019s log structure allows consumers to replay messages from any offset within the retention period. Consumers can reread data for recovery, reprocessing, or analytics without impacting other consumers or producers.
  10. Since Kafka retains messages based on time or size-based retention policies, consumers can pick up data where they left off or reprocess older data without affecting ongoing data flows.
"},{"location":"techdives/DistrubutedSystems/Kafka/#why-log-based-architecture-makes-kafka-unique","title":"Why Log-Based Architecture Makes Kafka Unique","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-log-based-architecture-in-practice","title":"Kafka\u2019s Log-Based Architecture in Practice","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#2-high-throughput-with-low-latency-design","title":"2. High-Throughput with Low-Latency Design","text":"

Kafka\u2019s design is optimized for handling millions of events per second with minimal delay, even under heavy load. This high throughput and low latency are achieved through a combination of disk I/O optimizations, data compression, and efficient network handling. Let\u2019s explore these in detail.

"},{"location":"techdives/DistrubutedSystems/Kafka/#key-components-of-kafkas-high-throughput-low-latency-design","title":"Key Components of Kafka\u2019s High-Throughput, Low-Latency Design","text":"
  1. Sequential Disk I/O:
  2. Kafka writes messages to disk sequentially rather than performing random writes. This significantly reduces seek time, as the disk head doesn\u2019t need to jump around to write or read data.
  3. Sequential writes take advantage of modern disks\u2019 ability to handle high-speed sequential I/O, especially in SSDs, allowing Kafka to process large volumes of data quickly.

  4. Page Cache Usage:

  5. Kafka leverages the OS\u2019s page cache to keep frequently accessed data in memory. By utilizing page cache, Kafka avoids direct disk reads for recently accessed data, reducing read latency.
  6. For producers writing data, Kafka batches messages in memory before flushing them to disk, improving throughput by reducing the number of disk write operations.

  7. Zero-Copy Data Transfer:

  8. Kafka uses zero-copy technology, specifically the sendfile() system call, to transfer data directly from disk to network sockets without additional memory copies.
  9. This allows Kafka to handle network I/O with minimal CPU usage, reducing latency and increasing throughput, especially for large messages.

  10. Batching and Compression:

  11. Kafka batches multiple messages into a single disk write or network packet, minimizing the number of I/O operations required.
  12. Batching not only increases efficiency but also improves compression rates by reducing network overhead. Kafka supports Gzip, Snappy, and LZ4 compression, reducing data size and thus speeding up data transfer.

  13. Network Optimization for Low Latency:

  14. Kafka\u2019s network layer is designed to handle high-speed data transfer between producers, brokers, and consumers. Kafka supports asynchronous data processing, allowing producers to continue sending messages without waiting for each acknowledgment, which further reduces latency.
  15. Kafka brokers are stateful, meaning they store partition offsets and metadata locally, reducing the need to rely on external systems for routing information. This minimizes delays in routing messages to the correct consumers.
"},{"location":"techdives/DistrubutedSystems/Kafka/#why-kafkas-throughput-and-latency-make-it-special","title":"Why Kafka\u2019s Throughput and Latency Make It Special","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#kafkas-high-throughput-low-latency-design-in-practice","title":"Kafka\u2019s High-Throughput, Low-Latency Design in Practice","text":""},{"location":"techdives/DistrubutedSystems/Kafka/#summary-of-kafkas-unique-log-based-architecture-and-high-throughput-design","title":"Summary of Kafka\u2019s Unique Log-Based Architecture and High-Throughput Design","text":"Aspect Log-Based Architecture High-Throughput with Low-Latency Design Core Principle Immutable, append-only logs for each partition. Optimized for sequential writes, memory management, and efficient data transfer. Data Storage Messages are stored as ordered, append-only logs in each partition, ensuring data immutability and ordering. Batching, compression, and zero-copy transfer ensure efficient storage and minimal latency in data handling. Fault Tolerance Replication across brokers enables resilience against broker failures, as partitions can be replayed and failover to available replicas. Brokers use page cache and zero-copy I/O to keep data transfer reliable under heavy load, even in the case of high data volumes. Parallelism and Scalability Partitions allow Kafka to scale horizontally, providing parallel processing across producers and consumers, with strict ordering within each partition. Sequential I/O and batching enable Kafka to handle high-throughput workloads, supporting massive parallelism across clients. Use Cases Event sourcing, state recovery, and audit logs where ordered, immutable data is essential. Real-time monitoring, analytics, and streaming data pipelines that require fast ingestion and minimal latency."},{"location":"techdives/DistrubutedSystems/Kafka/#recommended-kafka-settings","title":"Recommended Kafka Settings","text":"

Here are the organized into individual tables based on their sections. Each table provides a summary of the key settings along with recommended values and explanations.

"},{"location":"techdives/DistrubutedSystems/Kafka/#1-broker-level-settings","title":"1. Broker-Level Settings","text":"Setting Recommended Value Purpose replication.factor 3 Ensures redundancy and fault tolerance, allowing partition recovery if a broker fails. min.insync.replicas 2 Reduces data loss risk by ensuring at least two replicas acknowledge writes. num.partitions 6 (or based on load) Balances throughput and scalability; more partitions allow greater parallel processing. log.retention.hours 168 (7 days) Controls how long messages are retained; suitable for standard processing and replay needs. log.segment.bytes 1 GB Manages the segment size for optimal disk usage and performance in log rolling. log.cleanup.policy delete or compact delete for default; compact for retaining only the latest version of each key. compression.type producer, snappy, or lz4 Saves bandwidth and improves throughput, especially under high data volume conditions."},{"location":"techdives/DistrubutedSystems/Kafka/#2-producer-level-settings","title":"2. Producer-Level Settings","text":"Setting Recommended Value Purpose acks all Ensures that all in-sync replicas acknowledge writes, increasing reliability. retries Integer.MAX_VALUE Handles transient network issues by allowing indefinite retries, preventing message loss. retry.backoff.ms 100 Introduces a pause between retries, avoiding retry flooding and improving stability. enable.idempotence true Prevents duplicate messages by ensuring exactly-once semantics in data delivery. batch.size 32 KB Enhances throughput by accumulating records into batches before sending them. linger.ms 5 Small linger time allows batches to fill, reducing network overhead without delaying sends. compression.type snappy or lz4 Compresses data to reduce payload size, saving bandwidth and reducing transfer time."},{"location":"techdives/DistrubutedSystems/Kafka/#3-consumer-level-settings","title":"3. Consumer-Level Settings","text":"Setting Recommended Value Purpose auto.offset.reset earliest Ensures consumers start reading from the beginning if no offset is committed. max.poll.records 500 Controls the batch size per poll, balancing throughput and processing time. session.timeout.ms 30,000 Provides enough time for consumers to process data without triggering unnecessary rebalances. heartbeat.interval.ms 10,000 Sets the interval for heartbeat checks within the session timeout, reducing rebalance triggers. fetch.min.bytes 1 MB Improves fetch efficiency by waiting for a minimum data size before retrieving. fetch.max.bytes 50 MB Enables large batch sizes for high-throughput consumers, reducing network calls. enable.auto.commit false Disables automatic offset commits, allowing applications to commit only after processing."},{"location":"techdives/DistrubutedSystems/Kafka/#4-cluster-level-settings-for-high-availability-and-fault-tolerance","title":"4. Cluster-Level Settings for High Availability and Fault Tolerance","text":"Setting Recommended Value Purpose min.insync.replicas 2 Ensures at least two replicas must be in sync, providing better durability. controlled.shutdown.enable true Enables controlled shutdown, allowing brokers to gracefully transition leadership. unclean.leader.election.enable false Prevents out-of-sync replicas from being elected as leaders, protecting data consistency. num.network.threads 8 Increases concurrency for network traffic, supporting high-throughput applications. num.io.threads 8 Increases I/O concurrency, allowing efficient data transfer under heavy load. num.replica.fetchers 4 Enhances replication speed by allowing multiple fetcher threads for synchronizing replicas."},{"location":"techdives/DistrubutedSystems/Kafka/#5-zookeeper-settings","title":"5. Zookeeper Settings","text":"Setting Recommended Value Purpose zookeeper.session.timeout.ms 18,000 Prevents frequent Zookeeper disconnections during high loads, stabilizing metadata handling. zookeeper.connection.timeout.ms 6,000 Ensures reliable connections to Zookeeper, reducing the likelihood of leader election issues."},{"location":"techdives/DistrubutedSystems/Kafka/#6-kraft-kafka-raft-settings","title":"6. KRaft (Kafka Raft) Settings","text":"Setting Recommended Value Purpose process.roles broker,controller Defines the roles of Kafka nodes. A KRaft cluster typically has combined broker and controller roles, but can be split if desired. controller.quorum.voters List of controllers Specifies the list of controller nodes in the form nodeID@hostname:port, where each entry represents a voter in the Raft consensus group. controller.listener.names CONTROLLER Designates the listener name for inter-controller communication in the Raft quorum. controller.heartbeat.interval.ms 2,000 Sets the interval between heartbeats for controller nodes, ensuring they stay connected and responsive within the Raft quorum. controller.metrics.sample.window.ms 30,000 Configures the window size for collecting metrics, helping to monitor Raft performance over time. controller.log.dirs /path/to/controller/logs Specifies the directory where controller logs are stored. It\u2019s best to use a dedicated disk for controller logs to avoid I/O contention with brokers. metadata.log.segment.bytes 1 GB Controls the segment size for metadata logs, managing disk usage and log rolling frequency for metadata in KRaft mode. metadata.log.retention.bytes -1 (unlimited) Configures metadata log retention based on disk space, allowing infinite retention by default. Adjust based on available storage. metadata.log.retention.ms 604,800,000 (7 days) Retains metadata for a set duration; typically configured for a week to enable rollback in case of issues. controller.socket.timeout.ms 30,000 Sets the timeout for controller-to-controller connections, ensuring stability during network issues. leader.imbalance.check.interval.seconds 300 (5 minutes) Defines the interval at which the controller checks for leader imbalance, helping to maintain even load distribution across brokers.

Reference Links:

https://www.hellointerview.com/learn/system-design/deep-dives/kafka

"},{"location":"techdives/DistrubutedSystems/Redis/","title":"Redis","text":"

Redis is an open-source, in-memory data structure store that serves as a database, cache, message broker, and streaming engine. Its versatility and high performance make it a popular choice for various applications. This comprehensive guide delves into Redis's architecture, data structures, commands, deployment strategies, and best practices.

"},{"location":"techdives/DistrubutedSystems/Redis/#1-introduction-to-redis","title":"1. Introduction to Redis","text":"

Redis, short for Remote Dictionary Server, is renowned for its speed and flexibility. Operating primarily in memory, it supports various data structures and offers features like replication, persistence, and high availability.

"},{"location":"techdives/DistrubutedSystems/Redis/#2-core-data-structures","title":"2. Core Data Structures","text":""},{"location":"techdives/DistrubutedSystems/Redis/#21-strings","title":"2.1. Strings","text":"

Definition: A string in Redis is a binary-safe sequence of bytes. It's the most basic data type in Redis, with a maximum size of 512 MB.

Examples: - Set and get a simple string:

SET key \"Hello, World!\"\nGET key\n
- Increment a numerical value:
SET counter 10\nINCR counter      // Result: 11\nINCRBY counter 5  // Result: 16\n

Underlying Data Structure: Dynamic String (SDS - Simple Dynamic String)

Time Complexity: - SET key value: O(1) - GET key: O(1) - INCR key: O(1)

Best Use Cases: - Caching: Store frequently accessed values. - Counters: Track counts for metrics or events. - Session Data: Store serialized JSON for user sessions.

"},{"location":"techdives/DistrubutedSystems/Redis/#22-hashes","title":"2.2. Hashes","text":"

Definition: A hash in Redis is a collection of field-value pairs, ideal for representing objects (e.g., user profiles).

Examples: - Creating and managing user data in a hash:

HSET user:1001 name \"Alice\" age 30 city \"New York\"\nHGET user:1001 name           // Returns \"Alice\"\nHGETALL user:1001             // Returns all key-value pairs in hash\n

Underlying Data Structure: Hash Table or ZipList (for small hashes)

Time Complexity: - HSET key field value: O(1) (amortized) - HGET key field: O(1) - HGETALL key: O(N) (N being the number of fields in the hash)

Best Use Cases: - Storing Objects: Represent complex entities. - Configuration Settings: Store configurations as a set of key-value pairs.

"},{"location":"techdives/DistrubutedSystems/Redis/#23-lists","title":"2.3. Lists","text":"

Definition: A list is an ordered collection of strings, allowing elements to be added at either end, functioning much like a linked list.

Examples: - Using a list to store recent activity:

LPUSH recent_activity \"login\" \"view_profile\" \"logout\"\nLRANGE recent_activity 0 -1   // Fetches all elements in the list\n

Underlying Data Structure: Linked List or QuickList (optimized for performance and memory usage)

Time Complexity: - LPUSH key value: O(1) - LRANGE key start stop: O(S+N) (S being the starting offset and N the number of elements retrieved)

Best Use Cases: - Activity Streams: Store recent actions or logs. - Task Queues: Implement FIFO or LIFO queues.

"},{"location":"techdives/DistrubutedSystems/Redis/#24-sets","title":"2.4. Sets","text":"

Definition: Sets are unordered collections of unique strings, ideal for performing set operations.

Examples: - Managing unique tags:

SADD tags \"redis\" \"database\" \"in-memory\"\nSMEMBERS tags                 // Returns all unique tags\n

Underlying Data Structure: Hash Table or IntSet (for small sets of integers)

Time Complexity: - SADD key value: O(1) - SMEMBERS key: O(N) (N being the number of elements in the set) - SINTER key1 key2 ... keyN: O(N*M) (N being the number of sets and M the smallest set)

Best Use Cases: - Unique Values: Track unique items like IP addresses. - Social Networks: Represent social relationships (e.g., friends, followers).

"},{"location":"techdives/DistrubutedSystems/Redis/#25-sorted-sets","title":"2.5. Sorted Sets","text":"

Definition: Similar to sets, but with an associated score that allows elements to be sorted by score.

Examples: - Storing leaderboard data:

ZADD leaderboard 100 \"Alice\" 200 \"Bob\"\nZRANGE leaderboard 0 -1 WITHSCORES\n

Underlying Data Structure: Skip List and Hash Table (for fast access and sorted ordering)

Time Complexity: - ZADD key score member: O(log(N)) - ZRANGE key start stop: O(log(N)+M) (M being the number of elements returned)

Best Use Cases: - Leaderboards: Rank users based on scores. - Event Prioritization: Sort items by priority or timestamp.

"},{"location":"techdives/DistrubutedSystems/Redis/#26-bitmaps","title":"2.6. Bitmaps","text":"

Definition: Bitmaps use strings to store and manipulate individual bits, offering efficient binary storage.

Examples: - Tracking user flags:

SETBIT user_flags 5 1         // Sets the 6th bit to 1\nGETBIT user_flags 5           // Returns 1\n

Underlying Data Structure: String (each bit is set or retrieved from the byte representation)

Time Complexity: - SETBIT key offset value: O(1) - GETBIT key offset: O(1) - BITCOUNT key: O(N) (N being the length of the string)

Best Use Cases: - Feature Flags: Toggle features for users. - Activity Tracking: Record binary states like presence or attendance.

"},{"location":"techdives/DistrubutedSystems/Redis/#27-hyperloglogs","title":"2.7. HyperLogLogs","text":"

Definition: HyperLogLog is a probabilistic structure for approximating unique element counts.

Examples: - Counting unique visitors:

PFADD visitors \"user1\" \"user2\"\nPFCOUNT visitors             // Returns approximate unique count\n

Underlying Data Structure: Sparse and Dense Data Representations (optimized for low memory usage)

Time Complexity: - PFADD key element: O(1) - PFCOUNT key: O(1)

Best Use Cases: - Unique Counting: Approximate counts of unique views or visitors. - Low-Memory Use: Ideal for large datasets with memory constraints.

"},{"location":"techdives/DistrubutedSystems/Redis/#28-streams","title":"2.8. Streams","text":"

Definition: A stream is a log-like data structure for managing continuous data flows, supporting consumer groups.

Examples: - Tracking event streams:

XADD mystream * name \"Alice\" action \"login\"\nXREAD COUNT 2 STREAMS mystream 0\n

Underlying Data Structure: Radix Tree (used for efficient storage and traversal of stream entries)

Time Complexity: - XADD key * field value: O(log(N)) (N being the number of items in the stream) - XREAD key start stop: O(log(N)+M) (M being the number of items returned)

Best Use Cases: - Event Sourcing: Track ordered events or logs. - Message Queues: Reliable message distribution with consumer groups.

"},{"location":"techdives/DistrubutedSystems/Redis/#29-geospatial-indexes","title":"2.9. Geospatial Indexes","text":"

Definition: Redis provides commands for storing and querying location data with latitude and longitude.

Examples: - Adding and querying locations:

GEOADD cities 13.361389 38.115556 \"Palermo\"\nGEORADIUS cities 15 37 200 km\n

Underlying Data Structure: Geohash with Sorted Sets (uses sorted sets for indexing)

Time Complexity: - GEOADD key longitude latitude member: O(log(N)) - GEORADIUS key longitude latitude radius: O(log(N)+M) (M being the number of results)

Best Use Cases: - Location-Based Services: Search and display nearby locations. - Geofencing: Detect whether users enter specific geographic zones.

"},{"location":"techdives/DistrubutedSystems/Redis/#3-commands-table","title":"3. Commands Table","text":"Data Structure Definition Example Commands Best Use Cases Time Complexity Strings Binary-safe sequences of bytes for text or binary data. SET key value, GET key, INCR key, DECR key, APPEND key value, STRLEN key Caching, counters, session data SET: O(1), GET: O(1), INCR: O(1), APPEND: O(N) Hashes Collection of key-value pairs, suitable for objects. HSET key field value, HGET key field, HGETALL key, HDEL key field, HLEN key Storing objects, configuration settings HSET: O(1), HGET: O(1), HGETALL: O(N), HLEN: O(1) Lists Ordered collection of strings, acts like a linked list. LPUSH key value, RPUSH key value, LPOP key, RPOP key, LRANGE key start stop, LLEN key Activity streams, task queues LPUSH: O(1), LRANGE: O(S+N), LLEN: O(1) Sets Unordered collections of unique strings, optimized for sets. SADD key value, SREM key value, SMEMBERS key, SISMEMBER key value, SUNION key1 key2, SINTER key1 key2 Unique values, social relationships SADD: O(1), SMEMBERS: O(N), SINTER: O(N*M) Sorted Sets Sets with scores, allowing elements to be sorted by score. ZADD key score member, ZRANGE key start stop WITHSCORES, ZREM key member, ZSCORE key member, ZREVRANGE key start stop, ZCOUNT key min max Leaderboards, event prioritization ZADD: O(log(N)), ZRANGE: O(log(N)+M), ZSCORE: O(1) Bitmaps Stores and manipulates bits in a binary-safe string. SETBIT key offset value, GETBIT key offset, BITCOUNT key, BITOP operation destkey key1 key2 Feature flags, activity tracking SETBIT: O(1), GETBIT: O(1), BITCOUNT: O(N) HyperLogLogs Probabilistic structure for approximate unique counts. PFADD key element, PFCOUNT key, PFMERGE destkey sourcekey1 sourcekey2 Unique counting, low-memory usage PFADD: O(1), PFCOUNT: O(1), PFMERGE: O(N) Streams Log-like structure for managing continuous data flows. XADD key * field value, XREAD COUNT n STREAMS key, XGROUP CREATE key group consumer_id, XACK key group message_id, XDEL key message_id, XINFO key, XLEN key, XTRIM key MAXLEN ~ count Event sourcing, message queues XADD: O(log(N)), XREAD: O(log(N)+M), XGROUP: O(1), XACK: O(1) Geospatial Indexes Stores and queries location data with latitude and longitude. GEOADD key longitude latitude member, GEODIST key member1 member2, GEORADIUS key longitude latitude radius m km, GEORADIUSBYMEMBER key member radius m km, GEOHASH key member Location-based services, geofencing GEOADD: O(log(N)), GEORADIUS: O(log(N)+M)"},{"location":"techdives/DistrubutedSystems/Redis/#4-persistence-and-durability","title":"4. Persistence and Durability","text":"

Redis operates primarily as an in-memory database, prioritizing speed and low-latency operations. However, it provides two main persistence mechanisms to ensure data durability:

Advantages: Lower I/O overhead, compact file size.

Disadvantages: Risk of data loss between snapshots if Redis crashes.

Advantages: Better durability, logs every operation for more frequent data persistence.

Disadvantages: Larger file sizes, higher I/O usage.

Choosing Between RDB and AOF: You can use either or both of these methods in combination based on your application needs. For example, using both allows for rapid recovery (RDB) with high durability (AOF).

"},{"location":"techdives/DistrubutedSystems/Redis/#5-replication-and-high-availability","title":"5. Replication and High Availability","text":"

Redis supports replication to create replicas of the primary (master) instance, enabling multiple read replicas and providing redundancy.

Best for: - Applications requiring high availability with automatic failover. - Scenarios where read-heavy workloads benefit from scaling reads across multiple replicas.

"},{"location":"techdives/DistrubutedSystems/Redis/#6-clustering-and-scalability","title":"6. Clustering and Scalability","text":"

Redis supports sharding through Redis Cluster, which enables data partitioning across multiple nodes, allowing horizontal scalability and distributed storage. Redis Cluster uses hash slots to determine data distribution across nodes, ensuring no single node contains the entire dataset.

Considerations: - Redis Cluster supports most single-key commands, but multi-key operations are restricted unless all keys map to the same slot. - Clustering can increase complexity in handling data operations across nodes but is essential for large datasets needing horizontal scalability.

"},{"location":"techdives/DistrubutedSystems/Redis/#7-security-considerations","title":"7. Security Considerations","text":"

While Redis is often deployed in secure, private networks, security remains essential, especially for production environments:

"},{"location":"techdives/DistrubutedSystems/Redis/#8-client-libraries-and-tools","title":"8. Client Libraries and Tools","text":"

Redis has a robust ecosystem with libraries for popular programming languages, making it easy to integrate Redis across platforms:

Additionally, Redis CLI and Redis Insight are commonly used tools for managing and monitoring Redis instances.

"},{"location":"techdives/DistrubutedSystems/Redis/#9-rediss-single-threaded-nature-and-atomic-operations","title":"9. Redis\u2019s Single-Threaded Nature and Atomic Operations","text":"

Redis uses a single-threaded event loop to handle client requests, which makes operations simpler and efficient. This single-threaded model has specific implications:

Impact on Use Cases: - In scenarios where atomicity is essential, such as counters or distributed locks, Redis's single-threaded nature provides strong consistency guarantees. - For CPU-bound workloads, Redis may be limited by its single-threaded design. However, since Redis is primarily I/O-bound, it scales well for read-heavy or network-intensive applications.

Benefits of Single-Threaded Execution: - Simplicity in design and implementation, eliminating race conditions. - Predictable performance with guaranteed atomicity.

Drawbacks: - Limited to single-core processing for request handling. For CPU-bound tasks, Redis's single-threading may become a bottleneck, but horizontal scaling (e.g., Redis Cluster) can help distribute the load.

"},{"location":"techdives/DistrubutedSystems/Redis/#10-approximate-requests-per-second-rps","title":"10. Approximate Requests Per Second (RPS)","text":"Operation Type Description Approximate RPS per Instance Notes Simple Reads (GET) Basic read operation for retrieving a single value 100,000 - 150,000 RPS Higher performance achievable on optimized hardware Simple Writes (SET) Basic write operation for setting a single key-value pair 100,000 - 150,000 RPS Slightly reduced if using AOF with always persistence Complex Reads (e.g., ZRANGE) Reads on complex data structures like sorted sets 50,000 - 80,000 RPS Lower due to additional computation and memory access Complex Writes (e.g., ZADD) Writes on complex data structures like sorted sets 50,000 - 80,000 RPS Additional processing to maintain sorted order impacts performance With AOF (Append-Only File) Writes with always mode persistence (AOF) 60,000 - 80,000 RPS Slightly reduced due to disk I/O overhead Snapshotting (RDB) Writes with periodic snapshots (RDB) 80,000 - 100,000 RPS Minimal impact on RPS except during snapshotting periods when CPU/I/O load is higher With Redis Cluster Distributed across multiple nodes Millions of RPS (scales with nodes) Redis Cluster allows horizontal scaling, increasing RPS proportionally with additional nodes"},{"location":"techdives/DistrubutedSystems/Redis/#notes","title":"Notes:","text":""},{"location":"techdives/DistrubutedSystems/Redis/#11-use-cases-we-can-use-redis-in","title":"11. Use Cases We Can Use Redis In","text":""},{"location":"techdives/DistrubutedSystems/Redis/#111-caching","title":"11.1. Caching","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview","title":"Overview","text":"

Redis is highly effective as a caching layer, providing extremely low-latency data retrieval that reduces the load on backend databases and improves application performance.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works","title":"How It Works","text":"
  1. Cache Common Data: Redis is commonly used to cache data that is expensive to compute or retrieve, such as:
  2. API responses
  3. Frequently queried database results
  4. Configuration settings

  5. Expiration and Eviction: Redis supports configurable expiration for keys, which allows cached data to expire after a specific time. It also supports eviction policies (like LRU or LFU) to automatically remove older or less-used items when memory limits are reached.

"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#112-session-management","title":"11.2. Session Management","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_1","title":"Overview","text":"

Redis is commonly used as a session store for web applications, especially in distributed environments where sharing session data across multiple servers is critical.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_1","title":"How It Works","text":"
  1. Store Session Data: Redis stores session data, often as a hash, using a unique session identifier as the key.
  2. Session Expiry: Redis supports setting time-to-live (TTL) for session keys, allowing automatic expiration of inactive sessions.
  3. Distributed Access: Applications running on multiple servers can access the same session data via Redis, providing a centralized session store.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_1","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_1","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#113-real-time-analytics","title":"11.3. Real-Time Analytics","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_2","title":"Overview","text":"

Redis\u2019s support for data structures like HyperLogLogs, sorted sets, and streams enables it to handle real-time analytics, tracking metrics, counts, and trends without requiring a traditional database.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_2","title":"How It Works","text":"
  1. HyperLogLog for Unique Counts: Track unique visitors, page views, and other metrics using HyperLogLog, which approximates the count of unique items.
  2. Sorted Sets for Ranking: Track and rank items based on scores, useful for leaderboards or tracking user activity levels.
  3. Streams for Event Data: Redis streams can capture continuous event data, making it possible to analyze data in real time or replay it later.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_2","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_2","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#114-message-brokering","title":"11.4. Message Brokering","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_3","title":"Overview","text":"

Redis\u2019s publish/subscribe (pub/sub) feature allows it to act as a lightweight message broker, facilitating real-time communication between distributed applications.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_3","title":"How It Works","text":"
  1. Publisher: A service or application publishes messages to a specific channel.
  2. Subscriber: Other services or applications subscribe to that channel to receive messages.
  3. Message Delivery: Messages are delivered to all active subscribers listening to the channel at the time of publication.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_3","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#example-commands","title":"Example Commands","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_3","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#115-geospatial-applications","title":"11.5. Geospatial Applications","text":""},{"location":"techdives/DistrubutedSystems/Redis/#overview_4","title":"Overview","text":"

Redis provides geospatial commands that make it suitable for applications requiring location-based searches and geofencing, such as ride-sharing or delivery tracking.

Redis uses a geohashing-like approach to handle geospatial data, but it combines it with sorted sets to enable efficient location-based queries, Redis converts latitude and longitude coordinates into a geohash-like value, which is then stored as a score in a sorted set. This encoding allows Redis to store location data compactly and enables efficient proximity queries.

"},{"location":"techdives/DistrubutedSystems/Redis/#how-it-works_4","title":"How It Works","text":"
  1. Store Location Data: Use GEOADD to add locations with latitude, longitude, and an associated member (e.g., user ID or landmark).
  2. Location-Based Queries: Redis allows querying locations within a specified radius and finding distances between locations.
"},{"location":"techdives/DistrubutedSystems/Redis/#implementation-steps_4","title":"Implementation Steps","text":""},{"location":"techdives/DistrubutedSystems/Redis/#example-commands_1","title":"Example Commands","text":""},{"location":"techdives/DistrubutedSystems/Redis/#benefits_4","title":"Benefits","text":""},{"location":"techdives/DistrubutedSystems/Redis/#116-summary-table","title":"11.6 Summary Table","text":"Use Case Description Key Commands Benefits Caching Store frequently accessed data for faster retrieval. SETEX, GET, DEL Reduces latency, lowers database load Session Management Store user sessions for distributed web applications. HSET, HGETALL, EXPIRE Fast, centralized session access across servers Real-Time Analytics Track metrics, counts, and trends in real time. PFADD, PFCOUNT, ZADD, XADD, XREAD Provides instant insights, reduces need for dedicated platforms Message Brokering Facilitate real-time communication between services. PUBLISH, SUBSCRIBE Real-time updates, lightweight message broker Geospatial Apps Perform location-based searches and calculations. GEOADD, GEORADIUS, GEORADIUSBYMEMBER Efficient geospatial operations for location-based services"},{"location":"techdives/DistrubutedSystems/Redis/#12-redis-issues","title":"12. Redis Issues","text":"

Let's dive deep into some key challenges, such as hot key issues, cache avalanche, cache penetration, cache stampede, and their corresponding solutions.

"},{"location":"techdives/DistrubutedSystems/Redis/#121-hot-key-issue","title":"12.1. Hot Key Issue","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description","title":"Description","text":"

A hot key issue occurs when a single key in Redis is accessed extremely frequently, causing uneven load distribution. This can happen in applications where certain data (e.g., a trending topic or popular product) is heavily requested. A hot key can overwhelm the Redis server or specific nodes in a Redis Cluster, leading to latency spikes and reduced performance.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions","title":"Solutions","text":"
  1. Replicate the Key:
  2. Store multiple copies of the hot key in Redis (e.g., hotkey_1, hotkey_2, hotkey_3). Then, use application logic to randomly pick a replica each time the key is accessed. This distributes the load across multiple keys.

  3. Use Redis Cluster:

  4. In a Redis Cluster, distribute the load by sharding hot keys across nodes. This may not completely eliminate the issue, but it can help mitigate its impact by spreading access across the cluster.

  5. Client-Side Caching:

  6. Implement a local cache on the client side or within the application servers to reduce the frequency of requests to Redis. This technique works well when the data is static or changes infrequently.

  7. Use a Load-Balancing Proxy:

  8. Use a Redis proxy (like Twemproxy or Codis) to balance requests to the hot key across multiple Redis instances.
"},{"location":"techdives/DistrubutedSystems/Redis/#122-cache-avalanche","title":"12.2. Cache Avalanche","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_1","title":"Description","text":"

A cache avalanche occurs when many cache keys expire at once, leading to a sudden flood of requests to the backend database as the cache misses accumulate. This can overwhelm the database, causing latency spikes or even outages.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_1","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_1","title":"Solutions","text":"
  1. Add Randomized Expiry Times:
  2. Set expiration times with a randomized offset (e.g., add a few seconds or minutes randomly) to avoid simultaneous expiry. For example:

    ttl = 3600 + random.randint(-300, 300)  # 3600 seconds +/- 5 minutes\n

  3. Cache Pre-Warming:

  4. Preload critical data into Redis before it expires. You can use background jobs to check key expiration and refresh data periodically.

  5. Lazy Loading with Synchronized Locking:

  6. Use a distributed locking mechanism to ensure that only one thread refreshes the data in Redis, while others wait. This can prevent multiple processes from overloading the backend database.

  7. Fallback Graceful Degradation:

  8. Implement a mechanism that provides stale or default data temporarily if the database is overwhelmed. This approach buys time until the cache is repopulated.
"},{"location":"techdives/DistrubutedSystems/Redis/#123-cache-penetration","title":"12.3. Cache Penetration","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_2","title":"Description","text":"

Cache penetration happens when requests for non-existent keys repeatedly bypass the cache and go to the backend database. Since these keys don\u2019t exist, they are never cached, resulting in continuous database requests, increasing the load on the database.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_2","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_2","title":"Solutions","text":"
  1. Cache Null Values:
  2. When a request results in a database miss, store a null value in Redis with a short TTL (e.g., 5 minutes). Future requests for the same key will hit Redis instead of the database. Example:

    if not redis.exists(\"non_existent_key\"):\n    data = fetch_from_database(\"non_existent_key\")\n    if data is None:\n        redis.setex(\"non_existent_key\", 300, None)  # Cache null for 5 minutes\n

  3. Input Validation:

  4. Filter out clearly invalid requests before querying Redis or the backend. For instance, if certain key patterns are obviously invalid, ignore them early in the request flow.

  5. Bloom Filter:

  6. Implement a Bloom filter at the cache layer to quickly determine if a key likely exists in the database. This reduces unnecessary database calls by discarding requests for non-existent keys without hitting Redis or the backend.
"},{"location":"techdives/DistrubutedSystems/Redis/#124-cache-stampede","title":"12.4. Cache Stampede","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_3","title":"Description","text":"

A cache stampede occurs when multiple threads or clients attempt to update an expired cache key simultaneously, causing a burst of requests to the backend database. This is similar to a cache avalanche but occurs at the key level rather than across all keys.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_3","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_3","title":"Solutions","text":"
  1. Mutex Locking:
  2. Use a distributed lock (e.g., Redlock) to ensure that only one client refreshes the cache while others wait. This reduces the load on the database:

    # Pseudocode for acquiring a lock\nif redis.setnx(\"lock:key\", 1):\n    try:\n        # Fetch and cache the data\n        data = fetch_from_database(\"key\")\n        redis.setex(\"key\", 3600, data)\n    finally:\n        redis.delete(\"lock:key\")  # Release the lock\n

  3. Early Re-Caching (Soft Expiration):

  4. Implement soft expiration by setting a short expiration on frequently requested keys and refreshing them asynchronously before they expire. This keeps the data fresh in Redis and avoids a stampede.

  5. Leverage Stale Data:

  6. Allow clients to use slightly stale data by extending the expiration time if a refresh is already in progress. This minimizes the load on the backend.
"},{"location":"techdives/DistrubutedSystems/Redis/#125-memory-and-eviction-issues","title":"12.5. Memory and Eviction Issues","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_4","title":"Description","text":"

Redis operates in memory, so it has a limited capacity. When Redis reaches its memory limit, it must evict keys to free up space, potentially removing critical data. Improper eviction policies can lead to cache churn and data inconsistency.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_4","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_4","title":"Solutions","text":"
  1. Choose an Appropriate Eviction Policy:
  2. Redis offers multiple eviction policies (noeviction, allkeys-lru, volatile-lru, allkeys-lfu, etc.). Choose one that matches your data access patterns. For instance:

    • LRU (Least Recently Used): Removes least recently accessed keys, suitable for caching.
    • LFU (Least Frequently Used): Removes keys that are less frequently accessed.
  3. Optimize Data Size:

  4. Reduce the memory footprint by optimizing data storage, such as using shorter key names or serializing data efficiently (e.g., storing integers directly rather than as strings).

  5. Monitor and Scale:

  6. Continuously monitor Redis memory usage with tools like Redis CLI or Redis Insights. If memory usage grows, consider horizontal scaling with Redis Cluster.

  7. Use Redis as a Pure Cache:

  8. Configure Redis as a pure cache by setting appropriate TTLs on keys and using an eviction policy that maintains the most valuable data.
"},{"location":"techdives/DistrubutedSystems/Redis/#126-slow-queries-and-latency-issues","title":"12.6. Slow Queries and Latency Issues","text":""},{"location":"techdives/DistrubutedSystems/Redis/#description_5","title":"Description","text":"

Redis is designed for fast access, but certain operations can cause high latency, especially when handling large datasets or complex commands like ZRANGE on large sorted sets.

"},{"location":"techdives/DistrubutedSystems/Redis/#causes_5","title":"Causes","text":""},{"location":"techdives/DistrubutedSystems/Redis/#solutions_5","title":"Solutions","text":"
  1. Optimize Commands:
  2. Avoid commands that can block or are computationally expensive. For example, break large list processing into smaller ranges instead of processing the entire list.

  3. Monitor Slow Queries:

  4. Use the Redis Slow Log to identify and optimize slow commands. Redis provides insights into commands that exceed a specified execution time threshold.

  5. Use Sharding or Clustering:

  6. Split large datasets across multiple nodes in a Redis Cluster to balance the load and reduce the impact of slow commands on any single node.
"},{"location":"techdives/DistrubutedSystems/Redis/#13-tuning-redis","title":"13. Tuning Redis","text":"Metric Description Tuning Recommendations Commands/Tools Memory Usage Measures the memory consumed by Redis, including all data stored in memory. - Monitor and limit data size per key. - Use appropriate eviction policies (allkeys-lru, allkeys-lfu, etc.). - Compress data (e.g., shorter key names). - Use Redis MEMORY USAGE to check memory footprint of specific keys. INFO memory, MEMORY USAGE CPU Utilization CPU load on the Redis server, indicating overall processing load. - Reduce CPU-intensive commands (ZRANGE on large sets, large LRANGE operations). - Offload tasks to background or batch processing if possible. - Use pipelining for batch operations. System tools (e.g., top), INFO cpu Cache Hit Ratio The ratio of cache hits to total cache requests (ideally close to 1). - Identify hot keys and cache them effectively. - Increase Redis memory if hit ratio is low due to evictions. - Ensure sufficient TTL to avoid frequent cache misses. INFO stats Evicted Keys Number of keys evicted due to memory limits. - Increase available memory if eviction is high. - Choose an appropriate eviction policy (allkeys-lru, volatile-ttl, etc.). - Adjust key TTLs to prevent frequent eviction of important data. INFO memory Connected Clients Number of clients connected to Redis at any given time. - Increase the maxclients configuration if reaching limits. - Use client-side caching to reduce load on Redis. INFO clients, CLIENT LIST Latency (Command Time) Measures the average response time per command in milliseconds. - Avoid using blocking or heavy commands on large data sets. - Distribute large data across a Redis Cluster. - Monitor slow log for commands that exceed expected time. SLOWLOG GET, INFO commandstats Command Rate Rate of commands per second, which affects overall performance. - Spread load across multiple Redis instances if command rate is high. - Use pipelining to reduce round-trips. - Optimize or reduce the frequency of unnecessary commands. INFO stats Key Expirations Number of keys that expire per second. - Add randomized TTLs to prevent cache avalanches. - Pre-warm critical keys to avoid sudden cache misses. - Monitor TTL settings to ensure balanced expiration. INFO stats Replication Lag Delay in data synchronization between master and replica nodes. - Tune repl-backlog-size for better sync reliability. - Monitor network latency and throughput between master and replica. - Use Redis Sentinel for reliable failover. INFO replication, REPLCONF Data Persistence Durability How frequently Redis saves data to disk (AOF/RDB). - Use RDB for infrequent snapshots; use AOF for higher durability. - Tune AOF rewrite frequency (auto-aof-rewrite-percentage). - Adjust RDB save intervals based on data criticality. CONFIG SET save, CONFIG SET appendonly Keyspace Misses Number of attempts to access non-existent keys. - Cache null values temporarily for non-existent keys to reduce misses. - Add input validation to filter invalid requests. - Use Bloom filters for non-existent keys in high-traffic systems. INFO stats, MEMORY USAGE Redis Slow Log Logs slow-running commands that exceed a threshold. - Use SLOWLOG to monitor commands that exceed time limits. - Adjust commands and optimize keys based on slow log findings. - Tune slowlog-log-slower-than to track performance bottlenecks. SLOWLOG GET, SLOWLOG RESET Network Bandwidth Measures bandwidth usage, impacting latency and speed. - Use Redis clustering to reduce network load on a single instance. - Enable pipelining and compression where possible. - Monitor and minimize network latency for high-frequency queries. System tools (e.g., ifconfig), INFO Eviction Policy Determines which keys Redis evicts first when memory limit is reached. - Choose policies based on use case (allkeys-lru, allkeys-lfu for caching, volatile-ttl for expiring keys first). - Regularly review and adjust TTLs for key eviction optimization. CONFIG SET maxmemory-policy, INFO memory Persistence Overhead Memory and CPU impact due to persistence settings (RDB or AOF). - Adjust save intervals or AOF rewriting to reduce persistence load. - Use a combination of AOF and RDB if the application requires high durability with performance. INFO persistence, CONFIG SET save Cluster Slot Utilization Measures how well data is balanced across Redis Cluster slots. - Rebalance slots if certain nodes handle disproportionate load. - Use Redis Cluster sharding to ensure balanced key distribution. - Regularly monitor slots and reshard as needed. CLUSTER INFO, CLUSTER NODES, CLUSTER REBALANCE"},{"location":"techdives/DistrubutedSystems/Redis/#14-best-practices","title":"14. Best Practices","text":"

To make the most of Redis:

"},{"location":"techdives/DistrubutedSystems/Redis/#15-questions","title":"15. Questions","text":"

Here\u2019s a structured Q&A-style deep dive into Redis to address all of these technical aspects.

"},{"location":"techdives/DistrubutedSystems/Redis/#1-sql-or-nosql-if-nosql-what-type-of-nosql","title":"1. SQL or NoSQL? If NoSQL, what type of NoSQL?","text":"

Q: Is Redis an SQL or NoSQL database?

A: Redis is a NoSQL database. Specifically, it is a key-value store that supports various data structures (e.g., strings, hashes, lists, sets, sorted sets, streams, bitmaps, and geospatial indexes).

"},{"location":"techdives/DistrubutedSystems/Redis/#2-type-of-db-supports-polymorphic","title":"2. Type of DB \u2026 Supports Polymorphic?","text":"

Q: What type of NoSQL database is Redis, and does it support polymorphism?

A: Redis is a key-value in-memory data store with support for a variety of data structures. Redis does not natively support polymorphic types in the way that document-based NoSQL databases do, but you can achieve some level of polymorphism by encoding data in a structured way (e.g., JSON or hash maps).

"},{"location":"techdives/DistrubutedSystems/Redis/#3-main-feature-db-built-for-and-who-built-it-and-on-what","title":"3. Main Feature, DB Built For, and Who Built It and on What","text":"

Q: What was Redis built for, who built it, and what are its main features?

A: Redis was initially created by Salvatore Sanfilippo as a high-performance in-memory database for use cases requiring low-latency, real-time data processing. Redis is built on C, and its main features include in-memory storage, data persistence, flexible data structures, and capabilities for caching, messaging, and real-time analytics.

"},{"location":"techdives/DistrubutedSystems/Redis/#4-olap-or-oltp-does-it-support-acid-or-base","title":"4. OLAP or OLTP? Does it support ACID or BASE?","text":"

Q: Is Redis OLAP or OLTP, and does it adhere to ACID or BASE properties?

A: Redis is generally used in OLTP (Online Transaction Processing) scenarios due to its low-latency and high-throughput design. Redis does not natively support full ACID properties but can achieve atomic operations within individual commands due to its single-threaded nature. It follows the BASE (Basically Available, Soft state, Eventual consistency) model.

"},{"location":"techdives/DistrubutedSystems/Redis/#5-cap-theorem-where-does-redis-fall","title":"5. CAP Theorem \u2013 Where does Redis fall?","text":"

Q: How does Redis align with the CAP theorem, and what does each part (Consistency, Availability, Partition Tolerance) mean?

A: Redis, especially in a clustered setup, adheres to the CP (Consistency and Partition Tolerance) model of the CAP theorem. In a non-clustered single-instance setup, Redis is highly consistent. However, in a clustered setup, it sacrifices some availability for consistency.

Time Consistency in Redis can be achieved with strict persistence settings and synchronous replication.

"},{"location":"techdives/DistrubutedSystems/Redis/#6-cluster-structure-from-cluster-to-records-the-whole-path","title":"6. Cluster Structure \u2013 From Cluster to Records, the Whole Path","text":"

Q: What is the structure of a Redis cluster from clusters down to individual records?

A: A Redis cluster is organized as follows: - Cluster: Composed of multiple nodes. - Nodes: Each node is responsible for a subset of the keyspace, organized into hash slots (16,384 in total). - Shards: Each node represents a shard of the data and can replicate across replicas. - Keys/Records: Each key is hashed to a specific slot, determining the node responsible for storing it.

"},{"location":"techdives/DistrubutedSystems/Redis/#7-the-fundamentals-of-a-cluster-all-building-blocks-from-cluster-to-records","title":"7. The Fundamentals of a Cluster \u2013 All Building Blocks from Cluster to Records","text":"

Q: What are the core building blocks of a Redis cluster?

A: Core components include: - Nodes: Independent Redis instances in a cluster. - Hash Slots: Redis divides keys into 16,384 slots for distribution across nodes. - Replication: Each primary node can have replicas to ensure data redundancy. - Partitions (Shards): Each node holds a partition of data for horizontal scalability.

"},{"location":"techdives/DistrubutedSystems/Redis/#8-multi-master-support","title":"8. Multi-Master Support","text":"

Q: Does Redis support multi-master configurations?

A: Redis does not support multi-master configurations in its native setup. It uses a single-master architecture per shard to ensure consistency.

"},{"location":"techdives/DistrubutedSystems/Redis/#9-master-slave-relationship-in-data-nodes","title":"9. Master-Slave Relationship in Data Nodes","text":"

Q: Does Redis follow a master-slave structure between data nodes?

A: Yes, in a Redis cluster, each data shard has a single master with one or more replicas (slaves) for redundancy. The slaves serve as read-only replicas unless promoted during failover.

"},{"location":"techdives/DistrubutedSystems/Redis/#10-node-structures-in-cluster","title":"10. Node Structures in Cluster","text":"

Q: What are the structures of nodes in a Redis cluster?

A: In a Redis cluster, each node is responsible for a subset of hash slots, with a master node serving write requests and one or more replicas serving as failover or read-only instances.

"},{"location":"techdives/DistrubutedSystems/Redis/#11-cluster-scaling-horizontal-and-vertical","title":"11. Cluster Scaling \u2013 Horizontal and Vertical","text":"

Q: Does Redis support horizontal and vertical scaling, and which is preferred?

A: Redis supports horizontal scaling (adding more nodes) via sharding in a cluster, which is generally preferred. Vertical scaling (adding more memory/CPU) is also possible but limited by hardware.

"},{"location":"techdives/DistrubutedSystems/Redis/#12-high-availability-explanation","title":"12. High Availability \u2013 Explanation","text":"

Q: How does Redis provide high availability?

A: Redis achieves high availability through replication and Redis Sentinel for monitoring and automatic failover. Redis Cluster further enhances availability by automatically promoting replicas if a primary node fails.

"},{"location":"techdives/DistrubutedSystems/Redis/#13-fault-tolerance-explanation","title":"13. Fault Tolerance \u2013 Explanation","text":"

Q: What mechanisms does Redis have for fault tolerance?

A: Redis ensures fault tolerance through data replication across replicas, and Sentinel monitors the master nodes to trigger failover in case of node failure.

"},{"location":"techdives/DistrubutedSystems/Redis/#14-replication","title":"14. Replication","text":"

Q: How does replication work in Redis?

A: Redis replication is asynchronous by default, with each master node replicating data to one or more replicas. In the event of a master failure, a replica is promoted to master status.

"},{"location":"techdives/DistrubutedSystems/Redis/#15-partitioning-and-sharding","title":"15. Partitioning and Sharding","text":"

Q: How does Redis handle partitioning and sharding?

A: Redis uses hash-based partitioning with 16,384 hash slots to distribute data across nodes. Each key is assigned a hash slot, which maps it to a specific node.

"},{"location":"techdives/DistrubutedSystems/Redis/#16-caching-in-depth","title":"16. Caching in Depth","text":"

Q: How does Redis perform caching?

A: Redis is an in-memory cache, providing low-latency access with various caching strategies (e.g., TTL, eviction policies like LRU and LFU). It supports key expiration and eviction for memory management.

"},{"location":"techdives/DistrubutedSystems/Redis/#17-storage-type-trees-used-for-storage","title":"17. Storage Type \u2013 Trees Used for Storage","text":"

Q: What storage type and structures does Redis use?

A: Redis stores data in memory using simple data structures and does not use B-trees or similar structures. Data is kept in-memory and optionally persisted to disk (AOF/RDB).

"},{"location":"techdives/DistrubutedSystems/Redis/#18-segments-or-page-approach","title":"18. Segments or Page Approach?","text":"

Q: Does Redis use a segments approach, page approach, or something else?

A: Redis does not use segments or page-based storage. Data is stored in-memory and is managed directly by the Redis process.

"},{"location":"techdives/DistrubutedSystems/Redis/#19-indexing-how-does-it-work","title":"19. Indexing \u2013 How Does It Work?","text":"

Q: How does Redis handle indexing?

A: Redis does not use traditional indexing. Instead, it directly maps keys to hash slots in the cluster, providing O(1) access time to each key.

"},{"location":"techdives/DistrubutedSystems/Redis/#20-routing","title":"20. Routing","text":"

Q: How does Redis route requests to the correct node in a cluster?

A: Redis routes requests based on key hashing. The key is hashed to determine its slot, which maps it to a specific node.

"},{"location":"techdives/DistrubutedSystems/Redis/#21-latency-including-write-read-indexing-and-replication-latency","title":"21. Latency \u2013 Including Write, Read, Indexing, and Replication Latency","text":"

Q: What are Redis\u2019s latency characteristics?

A: Redis provides sub-millisecond read/write latency under normal conditions. Replication latency is generally low, though network overhead may add some delay.

"},{"location":"techdives/DistrubutedSystems/Redis/#22-versioning","title":"22. Versioning","text":"

Q: Does Redis support versioning?

A: Redis does not natively support versioning. Application logic may be required to manage version control if needed.

"},{"location":"techdives/DistrubutedSystems/Redis/#23-locking-and-concurrency","title":"23. Locking and Concurrency","text":"

Q: How does Redis handle locking and concurrency?

A: Redis supports distributed locking through the Redlock algorithm for ensuring safe concurrent access across clients.

"},{"location":"techdives/DistrubutedSystems/Redis/#24-write-ahead-logging-wal","title":"24. Write-Ahead Logging (WAL)","text":"

Q: Does Redis support WAL?

A: Redis does not use WAL directly. However, the Append-Only File (AOF) is similar, logging each write operation to ensure persistence.

"},{"location":"techdives/DistrubutedSystems/Redis/#25-change-data-capture-cdc-support","title":"25. Change Data Capture (CDC) Support","text":"

Q: Does Redis support CDC?

A: Redis does not natively support Change Data Capture. External tools may be needed for real-time data change tracking.

"},{"location":"techdives/DistrubutedSystems/Redis/#26-query-type-and-query-in-depth","title":"26. Query Type and Query in Depth","text":"

Q: What types of queries does Redis support?

A: Redis is key-based and supports simple read/write commands without complex query languages. Operations include GET, SET, HGET, ZADD, etc.

"},{"location":"techdives/DistrubutedSystems/Redis/#27-query-optimizers","title":"27. Query Optimizers","text":"

Q: Does Redis have query optimizers?

A: Redis does not have traditional query optimizers, as it operates in O(1) for most key-based lookups.

"},{"location":"techdives/DistrubutedSystems/Redis/#28-sql-support","title":"28. SQL Support","text":"

Q: Does Redis support SQL?

A: Redis does not natively support SQL. However, RedisJSON or other libraries can provide SQL-like querying capabilities.

"},{"location":"techdives/DistrubutedSystems/Redis/#29-circuit-breakers","title":"29. Circuit Breakers","text":"

Q:

Does Redis have built-in circuit breaker support?

A: Redis itself does not implement circuit breakers. This is typically handled at the application or middleware layer.

"},{"location":"techdives/DistrubutedSystems/Redis/#30-data-retention-and-lifecycle-management","title":"30. Data Retention and Lifecycle Management","text":"

Q: How does Redis handle data lifecycle and retention?

A: Redis supports TTL on keys, and policies like Least Recently Used (LRU) enable retention management. Redis doesn\u2019t support multi-tier storage.

"},{"location":"techdives/DistrubutedSystems/Redis/#31-other-features","title":"31. Other Features","text":"

Q: What other features does Redis offer?

A: Redis supports data structures like streams for event logging, pub/sub for messaging, and geospatial indexing for location-based queries.

"},{"location":"techdives/DistrubutedSystems/Redis/#32-additional-modules","title":"32. Additional Modules","text":"

Q: What modules or libraries can be added to Redis?

A: Redis offers modules like RedisJSON (for JSON handling), RedisGraph (for graph data), and RedisBloom (for probabilistic data structures).

"},{"location":"techdives/DistrubutedSystems/Redis/#33-optimization-and-tuning-of-clusters","title":"33. Optimization and Tuning of Clusters","text":"

Q: How do you optimize and tune Redis clusters?

A: Key optimizations include appropriate partitioning, replication settings, eviction policies, and monitoring memory/CPU usage.

"},{"location":"techdives/DistrubutedSystems/Redis/#34-backup-and-recovery","title":"34. Backup and Recovery","text":"

Q: How does Redis handle backup and recovery?

A: Redis supports RDB snapshots and AOF for persistence. Backups are easily managed via AOF or manual RDB dumps.

"},{"location":"techdives/DistrubutedSystems/Redis/#35-security","title":"35. Security","text":"

Q: What are Redis\u2019s security features?

A: Redis supports authentication (AUTH command), SSL/TLS encryption, IP whitelisting, and role-based access control.

"},{"location":"techdives/DistrubutedSystems/Redis/#36-migration","title":"36. Migration","text":"

Q: Does Redis support migration tools?

A: Redis offers tools like redis-cli for basic migration, and Redis Enterprise provides more advanced migration capabilities.

"},{"location":"techdives/DistrubutedSystems/Redis/#37-recommended-cluster-setup","title":"37. Recommended Cluster Setup","text":"

Q: What\u2019s the recommended Redis cluster setup?

A: Typically, a Redis Cluster setup starts with 3 master nodes (for redundancy) and 3 replicas for high availability, totaling 6 nodes.

"},{"location":"techdives/DistrubutedSystems/Redis/#38-basic-cluster-setup-with-node-numbers-in-distributed-mode","title":"38. Basic Cluster Setup with Node Numbers in Distributed Mode","text":"

Q: How does a basic Redis cluster setup look in distributed mode?

A: A minimal Redis Cluster in distributed mode consists of 3 master nodes (handling 5,461 slots each) with 1 replica per master for redundancy.

"},{"location":"techdives/DistrubutedSystems/Redis/#39-segments-approach-or-page-approach-or-others","title":"39. Segments Approach or Page Approach or others","text":"

Q: Does Redis use a segments approach, page approach, or another storage approach?

A: Redis does not use a segments or page-based approach as it is an in-memory database. Data is stored directly in memory with no fixed segment or page structure, allowing for rapid access to keys. Redis is optimized for speed, relying on data structures like hash tables and direct in-memory allocation rather than traditional on-disk segment or page methods common in disk-based databases.

"},{"location":"techdives/DistrubutedSystems/S3/","title":"Amazon S3 (Simple Storage Service)","text":""},{"location":"techdives/DistrubutedSystems/S3/#1-introduction","title":"1. Introduction","text":"

Amazon S3 is a scalable object storage service offered by Amazon Web Services (AWS). It is designed to store and retrieve any amount of data from anywhere on the web, making it suitable for various use cases, including data backup, archiving, big data analytics, and hosting static websites.

"},{"location":"techdives/DistrubutedSystems/S3/#2-architecture-and-fundamentals","title":"2. Architecture and Fundamentals","text":""},{"location":"techdives/DistrubutedSystems/S3/#3-storage-classes","title":"3. Storage Classes","text":"

S3 offers a variety of storage classes optimized for different use cases, balancing cost and performance:

"},{"location":"techdives/DistrubutedSystems/S3/#data-retrieval-options","title":"Data Retrieval Options","text":""},{"location":"techdives/DistrubutedSystems/S3/#4-durability-availability-and-redundancy","title":"4. Durability, Availability and Redundancy","text":""},{"location":"techdives/DistrubutedSystems/S3/#5-security-features","title":"5. Security Features","text":""},{"location":"techdives/DistrubutedSystems/S3/#6-data-management-and-lifecycle-policies","title":"6. Data Management and Lifecycle Policies","text":""},{"location":"techdives/DistrubutedSystems/S3/#7-performance-and-optimization","title":"7. Performance and Optimization","text":""},{"location":"techdives/DistrubutedSystems/S3/#8-use-cases","title":"8. Use Cases","text":""},{"location":"techdives/DistrubutedSystems/S3/#9-best-practices","title":"9. Best Practices","text":""},{"location":"techdives/GeneralConcepts/git/","title":"Git","text":"

Version control is the cornerstone of modern software development, and Git stands as the most widely used version control system in the world. Whether you're a beginner or an experienced if you're a developer understanding Git is crucial for collaborative and individual projects alike. In this article, we'll take a deep dive into Git, covering everything from its basics to its advanced features, to equip you with the knowledge to master it and with a cheat sheet at the end.

"},{"location":"techdives/GeneralConcepts/git/#what-is-git","title":"What is Git ?","text":"

Git is a distributed version control system that tracks changes in files, enabling multiple developers to collaborate on a project effectively. Created in 2005 by Linus Torvalds, Git was initially designed for managing the Linux kernel's development. Today, it powers everything from small personal projects to massive enterprise software systems.

Key features

"},{"location":"techdives/GeneralConcepts/git/#installing-git","title":"Installing Git","text":"

Getting started with Git begins with installing it on your system. Here's how you can set it up based on your operating system:

MacwindowsLinux Use Homebrew to install Git
brew install git\n

Download Git for Windows from git-scm.com and follow the installer instructions.

Install Git using your distribution's package manager
sudo apt install git  # For Debian/Ubuntu\nsudo yum install git  # For CentOS/Red Hat\n
"},{"location":"techdives/GeneralConcepts/git/#verify-installation","title":"Verify Installation","text":"To confirm Git is installed correctly, run
git --version\n
"},{"location":"techdives/GeneralConcepts/git/#initial-configuration","title":"Initial Configuration","text":"After installation, configure Git with your name, email, and preferred editor
git config --global user.name \"Your Name\"\ngit config --global user.email \"your.email@example.com\"\ngit config --global core.editor \"code\"  # Use VSCode or any editor\n
"},{"location":"techdives/GeneralConcepts/git/#getting-started-with-git","title":"Getting Started with Git","text":""},{"location":"techdives/GeneralConcepts/git/#creating-a-new-repository","title":"Creating a New Repository","text":"

To start tracking changes in a project, initialize a repository

git init\n

"},{"location":"techdives/GeneralConcepts/git/#clone-an-existing-repository","title":"Clone an Existing Repository","text":"

To work on an existing project, clone its repository

git clone <repository-url>\n

"},{"location":"techdives/GeneralConcepts/git/#tracking-changes","title":"Tracking Changes","text":"

Stage Changes: Add files to the staging area

git add <file>\n

Commit Changes: Save changes to the repository

git commit -m \"<Write a proper commit message>\"\n

"},{"location":"techdives/GeneralConcepts/git/#checking-repository-status","title":"Checking Repository Status","text":"

View the status of your working directory and staged files

git status\n

"},{"location":"techdives/GeneralConcepts/git/#viewing-commit-history","title":"Viewing Commit History","text":"

Review the project's history with

git log\ngit log --oneline  # Concise view\n

"},{"location":"techdives/GeneralConcepts/git/#working-with-branches","title":"Working with Branches","text":"

Git's branching system is one of its most powerful features. Branches allow you to work on different features or bug fixes without affecting the main codebase.

"},{"location":"techdives/GeneralConcepts/git/#creating-switching-branches","title":"Creating Switching Branches","text":"

Create a new branch

git branch <branch-name>\n
Switch to the branch
git checkout <branch-name>\ngit switch <branch-name>  # New alternative\n
Creating and Switching to the branch
git checkout -b <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#merging-branches","title":"Merging Branches","text":"

To integrate changes from one branch into another

git checkout main # replace main with custom branch\ngit merge <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#handling-merge-conflicts","title":"Handling Merge Conflicts","text":"

If Git detects conflicting changes, resolve them manually by editing the affected files. Then

git add <file>\ngit commit\n

"},{"location":"techdives/GeneralConcepts/git/#remote-repositories","title":"Remote Repositories","text":"

Remote repositories allow teams to collaborate effectively.

"},{"location":"techdives/GeneralConcepts/git/#adding-a-remote","title":"Adding a Remote","text":"

Link your local repository to a remote

git remote add origin <repository-url>\n

"},{"location":"techdives/GeneralConcepts/git/#pushing-changes","title":"Pushing Changes","text":"

Send your commits to the remote repository

git push origin <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#pulling-updates","title":"Pulling Updates","text":"

Fetch and integrate changes from the remote repository

git pull\n

"},{"location":"techdives/GeneralConcepts/git/#removing-a-remote","title":"Removing a Remote","text":"

If needed, you can remove a remote

git remote remove origin\n

"},{"location":"techdives/GeneralConcepts/git/#advanced-git","title":"Advanced Git","text":""},{"location":"techdives/GeneralConcepts/git/#stashing-changes","title":"Stashing Changes","text":"

Temporarily save changes without committing

git stash\n
Retrieve them later with
git stash apply\n

"},{"location":"techdives/GeneralConcepts/git/#cherry-picking","title":"Cherry-Picking","text":"

Apply a specific commit from another branch

git cherry-pick <commit-hash>\n

"},{"location":"techdives/GeneralConcepts/git/#rebasing","title":"Rebasing","text":"

Rebase your branch onto another

git rebase <branch-name>\n

"},{"location":"techdives/GeneralConcepts/git/#amending-commits","title":"Amending Commits","text":"

Fix the last commit message or contents

git commit --amend\n

"},{"location":"techdives/GeneralConcepts/git/#understanding-git-internals","title":"Understanding Git Internals","text":"

Git operates by storing snapshots of your project at each commit, not deltas (differences). The key components of Git's internal storage include:

All data is stored in the .git directory.

"},{"location":"techdives/GeneralConcepts/git/#collaboration-workflows","title":"Collaboration Workflows","text":"

Teams often adopt workflows to streamline collaboration. Popular ones include:

"},{"location":"techdives/GeneralConcepts/git/#common-issues","title":"Common Issues","text":""},{"location":"techdives/GeneralConcepts/git/#best-practices","title":"Best Practices","text":""},{"location":"techdives/GeneralConcepts/git/#git-cheat-sheet","title":"Git Cheat Sheet","text":"

Below are all the essential Git commands.

Git Cheat Sheet

# Git Commands Cheat Sheet\n\n# Configuration\ngit config --global user.name \"Your Name\"       # Set user name\ngit config --global user.email \"your.email@uth.com\" # Set user email\ngit config --global core.editor \"code\"         # Set default editor\ngit config --list                              # View current configuration\n\n# Repository Management\ngit init                                       # Initialize a new repository\ngit clone <repository-url>                     # Clone an existing repository\ngit remote add origin <url>                    # Add remote repository\ngit remote -v                                  # View remote repositories\ngit remote remove <name>                       # Remove a remote\n\n# Staging and Committing\ngit add <file>                                 # Stage specific file\ngit add .                                      # Stage all files\ngit status                                     # Check status of repository\ngit commit -m \"Message\"                        # Commit with message\ngit commit --amend                             # Amend the last commit\n\n# Branching\ngit branch                                     # List branches\ngit branch <branch-name>                       # Create a new branch\ngit checkout <branch-name>                     # Switch to a branch\ngit checkout -b <branch-name>                  # Create and Switch to a branch\ngit switch <branch-name>                       # Modern way to switch branches\ngit branch -d <branch-name>                    # Delete a branch\ngit branch -D <branch-name>                    # Force delete a branch\n\n# Merging\ngit merge <branch-name>                        # Merge a branch into the current branch\n\n# Pulling and Pushing\ngit pull                                       # Fetch and merge from remote repository\ngit pull origin <branch-name>                  # Pull specific branch\ngit push origin <branch-name>                  # Push to remote repository\ngit push --all                                 # Push all branches\ngit push --tags                                # Push tags to remote\n\n# Logs and History\ngit log                                        # View commit history\ngit log --oneline                              # View concise commit history\ngit log --graph                                # View graphical commit history\n\n# Undo Changes\ngit reset HEAD <file>                          # Unstage a file\ngit checkout -- <file>                         # Discard changes in working directory\ngit revert <commit-hash>                       # Undo a specific commit (safe)\ngit reset <commit-hash>                        # Reset to a specific commit (dangerous)\n\n# Stashing\ngit stash                                      # Stash changes\ngit stash list                                 # List stashes\ngit stash apply                                # Apply the last stash\ngit stash drop                                 # Remove the last stash\ngit stash clear                                # Clear all stashes\n\n# Rebasing\ngit rebase <branch-name>                       # Rebase current branch onto another\ngit rebase -i <commit-hash>                    # Interactive rebase\n\n# Tags\ngit tag <tag-name>                             # Create a tag\ngit tag -a <tag-name> -m \"Message\"             # Create an annotated tag\ngit tag -d <tag-name>                          # Delete a tag locally\ngit push origin <tag-name>                     # Push a specific tag\ngit push --tags                                # Push all tags\n\n# Collaboration\ngit fetch                                      # Fetch updates from remote\ngit pull                                       # Fetch and merge updates\ngit pull origin <branch-name>                  # Pull specific branch\ngit push                                       # Push changes to remote\ngit push origin <branch-name>                  # Push specific branch\n\n# Ignoring Files\necho \"filename\" >> .gitignore                  # Add file to .gitignore\ngit rm --cached <file>                         # Stop tracking a file\n\n# Viewing Changes\ngit diff                                       # View unstaged changes\ngit diff --staged                              # View staged changes\ngit diff <commit-hash1> <commit-hash2>         # Compare two commits\n\n# Cherry-Picking\ngit cherry-pick <commit-hash>                  # Apply a specific commit to the current branch\n\n# Aliases\ngit config --global alias.co checkout          # Alias for checkout\ngit config --global alias.br branch            # Alias for branch\ngit config --global alias.cm commit            # Alias for commit\n
"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index e7dcafe..5205fe1 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,242 +2,242 @@ https://luci-mg.github.io/Under-the-Hood/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/blog/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/changelog/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/AbstractFactory/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Adapter/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Bridge/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Builder/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/CircuitBreakers/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Composite/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Decorator/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Facade/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/FactoryMethod/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Iterator/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Prototype/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Singleton/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/DesignPatterns/Strategy/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/FundamentalConcepts/ConcurrencyParallelism/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/FundamentalPrinciples/DRY/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/FundamentalPrinciples/KISS/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/FundamentalPrinciples/SOLID/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/fundamentaldives/FundamentalPrinciples/YAGNI/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/4Pillars/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/AccessModifPPPPP/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Collections-JCF/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/GarbageCollection/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Gradle/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/JDK-JRE-JVM/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Java8vs11vs17vs21/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/JavaPassBy/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/KeyWordsTerminolgies/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Intrinsic/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Issues-DeadLock/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Issues-LiveLock/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Issues-Others/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Issues-Starvation/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-Reentrant/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Locking-ReentrantReadWrite/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/LockingIntrinsicReentrant/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Maven/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/MavenVsGradle/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/MemoryModel/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/PrimitiveReferenceTypes/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/ReferenceTypesInDepth/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/StreamsLambdas/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/ThreadPoolTuning/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/ThreadPools/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Threads-Atomicity/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Threads/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Spring/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Spring/SpringAnnotations/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Spring/SpringBoot/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Spring/SpringCoreFramework/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/langdives/Java/Spring/SpringFrameworkVsSpringBoot/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedSystems/DockerAndK8s/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedSystems/ElasticSearch/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedSystems/Kafka/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedSystems/Redis/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/DistrubutedSystems/S3/ - 2024-12-30 + 2024-12-31 https://luci-mg.github.io/Under-the-Hood/techdives/GeneralConcepts/git/ - 2024-12-30 + 2024-12-31 \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 9dcaa798851eaf7505656dd5c029a511f3c3b5d4..6af27a921edaa3d7a6000afc4ddbb73ceec69e03 100644 GIT binary patch delta 617 zcmV-v0+#)&2CD`KABzYG05o%v2hV>Fq{?I2Lv#64d;it)*2nUI&6^5BiH9}Zw827+ zfx%n1L&_&et_&++#uO+YZHW^Yp2`~2UbECfBYTR47s=TYH3M7ct?DiuyJT9&7YX53 zqMBxDtee2tw227?WEF*2AoCCNzlD@foydPlWV^(whwT;sd$UVqh(T+E6> zzq0srHJj*XejQzYoE?zqWv1}po5K$BS1ac|95ny=O*AqFiIy_6yjgXN9@#ibTrn&Y zV9Ms>sUFxRq%D;A>{CQbU_F28@ysWSY`%V<&99e=Q1WrQqrpJd*E>2H=%THF=wh$O zw7~tBm@I8D)h%r2V+mZ(q8D5Md!NZACzc13D|SMrN$;3uh9vhkf(%XX8_FQP_=3*kMq{xh8Fi$P$0+?p;r`==Gpv zo>DuO?`yolZZ?gZeWPh`{na%K6Pf*2dUypVgqp6P-da%9ZJSD5b`_)Fv_Zm$MwzfhQrb8soF=I8E~ZVOO?m6hvxF9_WrBot&iman>Q7N5)W&*X@i9t z1B172hm=o{Tp3otj44n)+7c%)Je4)3y=JL}M)nj7FOst*Y6iB>Th(1ScFDAkFA~D7 zL^aLQSU0hYl!}cLG%6ye$kjmyv%fiKvQd7mS1EGjeWQQ*auQ!>VxW;$gz5c2nxtJA& zer56LYBtf&{5rb)I6ENI%S_?HH-{bMuU5``IB5R!n`mSV5-nwBd9&&kJ+g6>xMEl) zz?99$Q$4UtNLwiJ*{6t>z8C zkanPJK=?siqhv-20xPDi(d$9U zJf(Il-`9A9-E0~+`$p5?`m1XeCNlf4^zaH!2sK?ny|tjI+cuTB>?%gTX@i6hjWTuJ zMcIgB?Tq8SI+~(ezDcTXs7KGkW)wo?`vi!T3NK=0rL=KOI89LFT}-!1jZeO7QqiA` zrh0sl?ub*nZ5i#;Tk2U4uB=8oa2~eKOIVpg>vxVZ(bxHVd*_VN}lRmd(e$fNrFuag||@2>j|ArHR;Wy2#WTO|Mh DWa%yc diff --git a/techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/index.html b/techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/index.html index ffab2af..9f5fc8e 100644 --- a/techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/index.html +++ b/techdives/DistrubutedConcepts/HighAvailabilityFaultTolerance/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/DistrubutedSystems/DockerAndK8s/index.html b/techdives/DistrubutedSystems/DockerAndK8s/index.html index 98abb57..1a32417 100644 --- a/techdives/DistrubutedSystems/DockerAndK8s/index.html +++ b/techdives/DistrubutedSystems/DockerAndK8s/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/DistrubutedSystems/ElasticSearch/index.html b/techdives/DistrubutedSystems/ElasticSearch/index.html index c192edb..7e5056b 100644 --- a/techdives/DistrubutedSystems/ElasticSearch/index.html +++ b/techdives/DistrubutedSystems/ElasticSearch/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/DistrubutedSystems/Kafka/index.html b/techdives/DistrubutedSystems/Kafka/index.html index 3ab515c..88c4637 100644 --- a/techdives/DistrubutedSystems/Kafka/index.html +++ b/techdives/DistrubutedSystems/Kafka/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/DistrubutedSystems/Redis/index.html b/techdives/DistrubutedSystems/Redis/index.html index c9a4f48..e855dc6 100644 --- a/techdives/DistrubutedSystems/Redis/index.html +++ b/techdives/DistrubutedSystems/Redis/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/DistrubutedSystems/S3/index.html b/techdives/DistrubutedSystems/S3/index.html index 2d9ae7d..20f0244 100644 --- a/techdives/DistrubutedSystems/S3/index.html +++ b/techdives/DistrubutedSystems/S3/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References diff --git a/techdives/GeneralConcepts/git/index.html b/techdives/GeneralConcepts/git/index.html index e2db4ff..534d786 100644 --- a/techdives/GeneralConcepts/git/index.html +++ b/techdives/GeneralConcepts/git/index.html @@ -1387,7 +1387,7 @@ - Primitives References + Primitives & References