Skip to content

Commit

Permalink
Merge pull request #11 from MGTheTrain/feature/secure-file-storage
Browse files Browse the repository at this point in the history
Feature/secure file storage
  • Loading branch information
MGTheTrain authored Nov 18, 2024
2 parents 53870a0 + 978a717 commit 76b3f5d
Show file tree
Hide file tree
Showing 14 changed files with 350 additions and 36 deletions.
7 changes: 6 additions & 1 deletion .github/workflows/dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ jobs:
with:
go-version: '1.21.x'

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Grant execute permissions
run: chmod +x *
working-directory: ./scripts
Expand All @@ -24,7 +27,9 @@ jobs:
run: ./run-test.sh -u
working-directory: ./scripts

# Spin up integration environment (docker-compose or public hyper scaler infrastructure)
- name: Spin up external storage services
run: |
docker compose up -d postgres azure-blob-storage
- name: Install apt dependencies for integration test
run: |
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/pre-release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ jobs:
with:
go-version: '1.21.x'

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Grant execute permissions
run: chmod +x *
working-directory: ./scripts
Expand All @@ -24,7 +27,9 @@ jobs:
run: ./run-test.sh -u
working-directory: ./scripts

# Spin up integration environment (docker-compose or public hyper scaler infrastructure)
- name: Spin up external storage services
run: |
docker compose up -d postgres azure-blob-storage
- name: Install apt dependencies for integration test
run: |
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ jobs:
with:
go-version: '1.21.x'

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

- name: Grant execute permissions
run: chmod +x *
working-directory: ./scripts
Expand All @@ -25,7 +28,9 @@ jobs:
run: ./run-test.sh -u
working-directory: ./scripts

# Spin up integration environment (docker-compose or public hyper scaler infrastructure)
- name: Spin up external storage services
run: |
docker compose up -d postgres azure-blob-storage
- name: Install apt dependencies for integration test
run: |
Expand Down
19 changes: 17 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
SCRIPT_DIR = "scripts"

.PHONY: format-and-lint run-unit-tests run-integration-tests

.PHONY: format-and-lint run-unit-tests run-integration-tests \
spin-up-integration-test-docker-containers \
shut-down-integration-test-docker-containers \
spin-up-docker-containers shut-down-docker-containers

format-and-lint:
@cd $(SCRIPT_DIR) && ./format-and-lint.sh

Expand All @@ -10,3 +13,15 @@ run-unit-tests:

run-integration-tests:
@cd $(SCRIPT_DIR) && ./run-test.sh -i

spin-up-integration-test-docker-containers:
docker-compose up -d postgres azure-blob-storage

shut-down-integration-test-docker-containers:
docker-compose down postgres azure-blob-storage -v

spin-up-docker-containers:
docker-compose up -d --build

shut-down-docker-containers:
docker-compose down -v
20 changes: 4 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,30 +75,18 @@ make format-and-lint

### Run Tests

To run `unit` tests on Unix systems either execute

```sh
cd scripts
./run-test.sh -u
```

or
To run `unit tests` on Unix systems execute

```sh
make run-unit-tests
```

To run `integration` tests on Unix systems either execute

```sh
cd scripts
./run-test.sh -i
```

or
To run `integration tests` on Unix systems execute

```sh
make spin-up-integration-test-docker-containers
make run-integration-tests
make shut-down-integration-test-docker-containers # Optionally clear docker resources
```

### Applications
Expand Down
2 changes: 2 additions & 0 deletions crypto-vault-service.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
AZURE_BLOB_CONNECTOR_SETTINGS_CONNECTION_STRING="DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure-blob-storage:10000/devstoreaccount1;QueueEndpoint=http://azure-blob-storage:10001/devstoreaccount1;TableEndpoint=http://azure-blob-storage:10002/devstoreaccount1;"
CONNECTION_STRINGS__PSQL_DATABASE="Server=postgres;Port=5432;Database=meta;UserName=postgres;Password=postgres;Sslmode=Prefer"
1 change: 1 addition & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ services:
- "10002:10002"
volumes:
- azurite-data:/data
command: ["azurite", "--skipApiVersionCheck", "--blobHost", "0.0.0.0"]
restart: on-failure

volumes:
Expand Down
11 changes: 5 additions & 6 deletions internal/domain/contracts/blob_management.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@ package contracts

import (
"crypto_vault_service/internal/domain/model"
"mime/multipart"
)

// BlobManagement defines methods for managing blob operations.
type BlobManagement interface {
// Upload handles the upload of a blob from a multipart form.
// Returns the created Blob metadata and any error encountered.
Upload(form *multipart.Form) (*model.Blob, error)
// Upload handles the upload of blobs from file paths.
// Returns the created Blobs metadata and any error encountered.
Upload(filePath []string) ([]*model.Blob, error)

// DownloadByID retrieves a blob by its ID, returning the metadata and file data.
// Download retrieves a blob by its ID and name, returning the metadata and file data.
// Returns the Blob metadata, file data as a byte slice, and any error.
DownloadByID(blobId string) (*model.Blob, []byte, error)
Download(blobId, blobName string) (*model.Blob, []byte, error)

// DeleteByID removes a blob by its ID.
// Returns any error encountered.
Expand Down
21 changes: 15 additions & 6 deletions internal/domain/contracts/key_management.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,27 @@ package contracts

import (
"crypto_vault_service/internal/domain/model"
"mime/multipart"
)

// Define KeyType as a custom type (based on int)
type KeyType int

// Enum-like values using iota
const (
AsymmetricPublic KeyType = iota
AsymmetricPrivate
Symmetric
)

// KeyManagement defines methods for managing cryptographic key operations.
type KeyManagement interface {
// Upload handles the upload of a cryptographic key from a multipart form.
// Returns the created key metadata and any error encountered.
Upload(form *multipart.Form) (*model.CryptographicKey, error)
// Upload handles the upload of blobs from file paths.
// Returns the created Blobs metadata and any error encountered.
Upload(filePath []string) ([]*model.CryptographicKey, error)

// DownloadByID retrieves a cryptographic key by its ID, returning the metadata and key data.
// Download retrieves a cryptographic key by its ID and key type, returning the metadata and key data.
// Returns the key metadata, key data as a byte slice, and any error.
DownloadByID(keyId string) (*model.CryptographicKey, []byte, error)
Download(keyId string, keyType KeyType) (*model.CryptographicKey, []byte, error)

// DeleteByID removes a cryptographic key by its ID.
// Returns any error encountered.
Expand Down
175 changes: 175 additions & 0 deletions internal/infrastructure/connector/az_blob.go
Original file line number Diff line number Diff line change
@@ -1 +1,176 @@
package connector

import (
"bytes"
"context"
"crypto_vault_service/internal/domain/model"
"fmt"
"log"
"os"
"path/filepath"

"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/google/uuid"
)

// AzureBlobConnector is an interface for interacting with Azure Blob storage
type AzureBlobConnector interface {
// Upload uploads multiple files to Azure Blob Storage and returns their metadata.
Upload(filePaths []string) ([]*model.Blob, error)
// Download retrieves a blob's content by its ID and name, and returns the data as a stream.
Download(blobId, blobName string) (*bytes.Buffer, error)
// Delete deletes a blob from Azure Blob Storage by its ID and Name, and returns any error encountered.
Delete(blobId, blobName string) error
}

// AzureBlobConnectorImpl is a struct that holds the Azure Blob storage client.
type AzureBlobConnectorImpl struct {
Client *azblob.Client
ContainerName string
}

// NewAzureBlobConnector creates a new AzureBlobConnectorImpl instance using a connection string.
// It returns the connector and any error encountered during the initialization.
func NewAzureBlobConnector(connectionString string, containerName string) (*AzureBlobConnectorImpl, error) {
client, err := azblob.NewClientFromConnectionString(connectionString, nil)
if err != nil {
return nil, fmt.Errorf("failed to create Azure Blob client: %w", err)
}

_, err = client.CreateContainer(context.Background(), containerName, nil)
if err != nil {
fmt.Printf("Failed to create Azure container: %v\n", err) // The container may already exist, so we should not return an error in this case.
}

return &AzureBlobConnectorImpl{
Client: client,
ContainerName: containerName,
}, nil
}

// Upload uploads multiple files to Azure Blob Storage and returns their metadata.
func (abc *AzureBlobConnectorImpl) Upload(filePaths []string) ([]*model.Blob, error) {
var blobs []*model.Blob
blobId := uuid.New().String()

// Iterate through all file paths and upload each file
for _, filePath := range filePaths {
// Open the file from the given filePath
file, err := os.Open(filePath)
if err != nil {
err = fmt.Errorf("failed to open file '%s': %w", filePath, err)
abc.rollbackUploadedBlobs(blobs) // Rollback previously uploaded blobs
return nil, err
}
// Ensure file is closed after processing
defer file.Close()

// Get file info (name, size, etc.)
fileInfo, err := file.Stat()
if err != nil {
err = fmt.Errorf("failed to stat file '%s': %w", filePath, err)
abc.rollbackUploadedBlobs(blobs)
return nil, err
}

// Read the file into a byte slice
buf := new(bytes.Buffer)
_, err = buf.ReadFrom(file)
if err != nil {
err = fmt.Errorf("failed to read file '%s': %w", filePath, err)
abc.rollbackUploadedBlobs(blobs)
return nil, err
}

// Extract the file extension (type)
fileExt := filepath.Ext(fileInfo.Name()) // Gets the file extension (e.g. ".txt", ".jpg")

// Create a Blob object for metadata
blob := &model.Blob{
ID: blobId,
Name: fileInfo.Name(),
Size: fileInfo.Size(),
Type: fileExt,
}

fullBlobName := fmt.Sprintf("%s/%s", blob.ID, blob.Name) // Combine ID and name to form a full path
fullBlobName = filepath.ToSlash(fullBlobName) // Ensure consistent slash usage across platforms

// Upload the blob to Azure
_, err = abc.Client.UploadBuffer(context.Background(), abc.ContainerName, fullBlobName, buf.Bytes(), nil)
if err != nil {
err = fmt.Errorf("failed to upload blob '%s': %w", fullBlobName, err)
abc.rollbackUploadedBlobs(blobs)
return nil, err
}

log.Printf("Blob '%s' uploaded successfully.\n", blob.Name)

// Add the successfully uploaded blob to the list
blobs = append(blobs, blob)
}

// Return the list of blobs after successful upload.
return blobs, nil
}

// rollbackUploadedBlobs deletes the blobs that were uploaded successfully before the error occurred
func (abc *AzureBlobConnectorImpl) rollbackUploadedBlobs(blobs []*model.Blob) {
for _, blob := range blobs {
err := abc.Delete(blob.ID, blob.Name)
if err != nil {
log.Printf("Failed to delete blob '%s' during rollback: %v", blob.Name, err)
} else {
log.Printf("Blob '%s' deleted during rollback.\n", blob.Name)
}
}
}

// Download retrieves a blob's content by its ID and name, and returns the data as a stream.
func (abc *AzureBlobConnectorImpl) Download(blobId, blobName string) (*bytes.Buffer, error) {
ctx := context.Background()

// Construct the full blob path by combining blob ID and name
fullBlobName := fmt.Sprintf("%s/%s", blobId, blobName) // Combine ID and name to form a full path

// Download the blob as a stream
get, err := abc.Client.DownloadStream(ctx, abc.ContainerName, fullBlobName, nil)
if err != nil {
return nil, fmt.Errorf("failed to download blob '%s': %w", fullBlobName, err)
}

// Prepare the buffer to hold the downloaded data
downloadedData := bytes.Buffer{}

// Create a retryable reader in case of network or temporary failures
retryReader := get.NewRetryReader(ctx, &azblob.RetryReaderOptions{})
_, err = downloadedData.ReadFrom(retryReader)
if err != nil {
return nil, fmt.Errorf("failed to read data from blob '%s': %w", fullBlobName, err)
}

// Close the retryReader stream after reading
err = retryReader.Close()
if err != nil {
return nil, fmt.Errorf("failed to close retryReader for blob '%s': %w", fullBlobName, err)
}

// Return the buffer containing the downloaded data
return &downloadedData, nil
}

// Delete deletes a blob from Azure Blob Storage by its ID and Name, and returns any error encountered.
func (abc *AzureBlobConnectorImpl) Delete(blobId, blobName string) error {
ctx := context.Background()

// Construct the full blob path by combining blob ID and name
fullBlobName := fmt.Sprintf("%s/%s", blobId, blobName) // Combine ID and name to form a full path

// Delete the blob
_, err := abc.Client.DeleteBlob(ctx, abc.ContainerName, fullBlobName, nil)
if err != nil {
return fmt.Errorf("failed to delete all blobs in %s", blobId)
}
fmt.Printf("Deleted all blobs in %s folder", blobId)
return nil
}
Loading

0 comments on commit 76b3f5d

Please sign in to comment.