diff --git a/pom.xml b/pom.xml
index 00fdd7e02..c3cfb9403 100644
--- a/pom.xml
+++ b/pom.xml
@@ -5,7 +5,7 @@
StoRM Backend server
org.italiangrid.storm
storm-backend-server
- 1.11.19
+ 1.12.0
@@ -18,13 +18,13 @@
3.5.0.Final
3.3.0
1.1
- 1.2.1
+ 2.8.0
2.0.1
0.5.2
3.0
1.1
20080701
- 4.8.1
+ 4.13.1
1.0
0.2
3.1
@@ -35,7 +35,7 @@
1.4.6
2.3.3
- 5.1.12
+ 8.0.16
18.0
3.1.0
@@ -376,8 +376,8 @@
- commons-dbcp
- commons-dbcp
+ org.apache.commons
+ commons-dbcp2
${commonsDbcpVersion}
diff --git a/src/main/java/it/grid/storm/Constants.java b/src/main/java/it/grid/storm/Constants.java
index ca61cbf93..448378341 100644
--- a/src/main/java/it/grid/storm/Constants.java
+++ b/src/main/java/it/grid/storm/Constants.java
@@ -35,7 +35,6 @@ public class Constants {
private static final Logger log = LoggerFactory.getLogger(Constants.class);
public static final Entry BE_VERSION;
- public static final Entry NAMESPACE_VERSION;
public static final Entry BE_OS_DISTRIBUTION;
public static final Entry BE_OS_PLATFORM;
public static final Entry BE_OS_KERNEL_RELEASE;
@@ -49,7 +48,6 @@ private Constants() {}
static {
BE_VERSION = new Entry("BE-Version", Constants.class.getPackage().getImplementationVersion());
- NAMESPACE_VERSION = new Entry("Namespace-version", "1.5.0");
BE_OS_DISTRIBUTION = new Entry("BE-OS-Distribution", getDistribution());
Map map = getPlatformKernel();
BE_OS_PLATFORM = new Entry(BE_OS_PLATFORM_KEY, map.get(BE_OS_PLATFORM_KEY));
diff --git a/src/main/java/it/grid/storm/ShutdownHook.java b/src/main/java/it/grid/storm/ShutdownHook.java
index 09cb21c22..2dffea96f 100644
--- a/src/main/java/it/grid/storm/ShutdownHook.java
+++ b/src/main/java/it/grid/storm/ShutdownHook.java
@@ -28,6 +28,7 @@ public void run() {
storm.stopSpaceGC();
storm.stopExpiredAgent();
storm.stopDiskUsageService();
+ storm.stopRequestGarbageCollector();
GPFSQuotaManager.INSTANCE.shutdown();
log.info("StoRM: Backend successfully stopped.");
diff --git a/src/main/java/it/grid/storm/StoRM.java b/src/main/java/it/grid/storm/StoRM.java
index 2c4852fd3..c18b63a51 100644
--- a/src/main/java/it/grid/storm/StoRM.java
+++ b/src/main/java/it/grid/storm/StoRM.java
@@ -30,8 +30,8 @@
import it.grid.storm.asynch.AdvancedPicker;
import it.grid.storm.catalogs.ReservedSpaceCatalog;
-import it.grid.storm.catalogs.StoRMDataSource;
-import it.grid.storm.catalogs.timertasks.ExpiredPutRequestsAgent;
+import it.grid.storm.catalogs.executors.RequestFinalizerService;
+import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector;
import it.grid.storm.check.CheckManager;
import it.grid.storm.check.CheckResponse;
import it.grid.storm.check.CheckStatus;
@@ -72,13 +72,16 @@ public class StoRM {
private boolean isSpaceGCRunning = false;
/*
- * Timer object in charge of transit expired put requests from SRM_SPACE_AVAILABLE to
- * SRM_FILE_LIFETIME_EXPIRED and from SRM_REQUEST_INPROGRESS to SRM_FAILURE
+ * Agent in charge of transit expired ptg/ptp/bol requests to final statuses
*/
- private final Timer transiter = new Timer();
- private TimerTask expiredAgent;
+ private RequestFinalizerService expiredAgent;
private boolean isExpiredAgentRunning = false;
+ /* Requests Garbage Collector */
+ private final Timer rgc = new Timer();
+ private TimerTask rgcTask;
+ private boolean isRequestGCRunning = false;
+
private boolean isDiskUsageServiceEnabled = false;
private DiskUsageService duService;
@@ -96,7 +99,7 @@ public StoRM() {
config = Configuration.getInstance();
picker = new AdvancedPicker();
- spaceCatalog = new ReservedSpaceCatalog();
+ spaceCatalog = ReservedSpaceCatalog.getInstance();
}
@@ -108,8 +111,6 @@ public void init() throws BootstrapException {
configureMetricsReporting();
- configureStoRMDataSource();
-
loadNamespaceConfiguration();
HealthDirector.initializeDirector(false);
@@ -216,11 +217,6 @@ private void performSanityChecks() throws BootstrapException {
}
- private void configureStoRMDataSource() {
-
- StoRMDataSource.init();
- }
-
/**
* Method used to start the picker.
*/
@@ -247,14 +243,6 @@ public synchronized void stopPicker() {
isPickerRunning = false;
}
- /**
- * @return
- */
- public synchronized boolean pickerIsRunning() {
-
- return isPickerRunning;
- }
-
/**
* Method used to start xmlrpcServer.
*
@@ -382,14 +370,6 @@ public synchronized void stopSpaceGC() {
isSpaceGCRunning = false;
}
- /**
- * @return
- */
- public synchronized boolean spaceGCIsRunning() {
-
- return isSpaceGCRunning;
- }
-
/**
* Starts the internal timer needed to periodically check and transit requests whose pinLifetime
* has expired and are in SRM_SPACE_AVAILABLE, to SRM_FILE_LIFETIME_EXPIRED. Moreover, the
@@ -405,16 +385,8 @@ public synchronized void startExpiredAgent() {
return;
}
- /* Delay time before starting cleaning thread! Set to 1 minute */
- final long delay = config.getTransitInitialDelay() * 1000L;
- /* Period of execution of cleaning! Set to 1 hour */
- final long period = config.getTransitTimeInterval() * 1000L;
- /* Expiration time before starting move in-progress requests to failure */
- final long inProgressExpirationTime = config.getInProgressPutRequestExpirationTime();
-
log.debug("Starting Expired Agent.");
- expiredAgent = new ExpiredPutRequestsAgent(inProgressExpirationTime);
- transiter.scheduleAtFixedRate(expiredAgent, delay, period);
+ expiredAgent = new RequestFinalizerService(config);
isExpiredAgentRunning = true;
log.debug("Expired Agent started.");
}
@@ -428,7 +400,7 @@ public synchronized void stopExpiredAgent() {
log.debug("Stopping Expired Agent.");
if (expiredAgent != null) {
- expiredAgent.cancel();
+ expiredAgent.stop();
}
log.debug("Expired Agent stopped.");
isExpiredAgentRunning = false;
@@ -498,6 +470,40 @@ public synchronized void stopDiskUsageService() {
}
}
+ public synchronized void startRequestGarbageCollector() {
+
+ if (isRequestGCRunning) {
+ log.debug("Requests Garbage Collector is already running.");
+ return;
+ }
+
+ /* Delay time before starting cleaning thread */
+ final long delay = config.getRequestPurgerDelay() * 1000L;
+ /* Period of execution of cleaning */
+ final long period = config.getRequestPurgerPeriod() * 1000L;
+
+ log.debug("Starting Requests Garbage Collector .");
+ rgcTask = new RequestsGarbageCollector(rgc, period);
+ rgc.schedule(rgcTask, delay);
+ isRequestGCRunning = true;
+ log.debug("Requests Garbage Collector started.");
+ }
+
+ public synchronized void stopRequestGarbageCollector() {
+
+ if (!isRequestGCRunning) {
+ log.debug("Requests Garbage Collector is not running.");
+ return;
+ }
+
+ log.debug("Stopping Requests Garbage Collector.");
+ if (rgcTask != null) {
+ rgcTask.cancel();
+ }
+ log.debug("Requests Garbage Collector stopped.");
+ isRequestGCRunning = false;
+ }
+
public void startServices() throws Exception {
startPicker();
@@ -505,6 +511,7 @@ public void startServices() throws Exception {
startRestServer();
startSpaceGC();
startExpiredAgent();
+ startRequestGarbageCollector();
startDiskUsageService();
}
@@ -515,6 +522,7 @@ public void stopServices() {
stopRestServer();
stopSpaceGC();
stopExpiredAgent();
+ stopRequestGarbageCollector();
stopDiskUsageService();
GPFSQuotaManager.INSTANCE.shutdown();
diff --git a/src/main/java/it/grid/storm/acl/AclManager.java b/src/main/java/it/grid/storm/acl/AclManager.java
index 5a9f03de9..5eba59c74 100644
--- a/src/main/java/it/grid/storm/acl/AclManager.java
+++ b/src/main/java/it/grid/storm/acl/AclManager.java
@@ -114,60 +114,4 @@ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser
FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser,
FilesystemPermission permission) throws IllegalArgumentException;
- /**
- * @param localFile an existent file
- * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
- * a not existent file
- */
- void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException;
-
- /**
- * @param localFile an existent file
- * @param localUser
- * @param permission
- * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
- * a not existent file
- */
- void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
-
- /**
- * @param localFile an existent file
- * @param localUser
- * @param permission
- * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
- * a not existent file
- */
- void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission)
- throws IllegalArgumentException;
-
- /**
- * @param localFile an existent file
- * @param localUser
- * @param permission
- * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
- * a not existent file
- */
- void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException;
-
- /**
- * @param localFile an existent file
- * @param localUser
- * @param permission
- * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to
- * a not existent file
- */
- void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission)
- throws IllegalArgumentException;
-
- /**
- * @param oldLocalFile an existent source file
- * @param newLocalFile an existent destination file
- * @throws IllegalArgumentException if received null parameters or the LocalFile objects refers to
- * not existent files
- */
- void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile)
- throws IllegalArgumentException;
-
}
diff --git a/src/main/java/it/grid/storm/acl/AclManagerFS.java b/src/main/java/it/grid/storm/acl/AclManagerFS.java
index 52f813bea..028c441d0 100644
--- a/src/main/java/it/grid/storm/acl/AclManagerFS.java
+++ b/src/main/java/it/grid/storm/acl/AclManagerFS.java
@@ -27,12 +27,6 @@ public static AclManager getInstance() {
return instance;
}
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.acl.AclManager#grantGroupPermission(it.grid.storm.griduser .LocalUser,
- * it.grid.storm.filesystem.FilesystemPermission)
- */
@Override
public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser,
FilesystemPermission permission) throws IllegalArgumentException {
@@ -50,12 +44,6 @@ public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser
return newPermission;
}
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.acl.AclManager#grantUserPermission(it.grid.storm.filesystem .LocalFile,
- * it.grid.storm.griduser.LocalUser, it.grid.storm.filesystem.FilesystemPermission)
- */
@Override
public FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser,
FilesystemPermission permission) throws IllegalArgumentException {
@@ -175,68 +163,4 @@ public FilesystemPermission setUserPermission(LocalFile localFile, LocalUser loc
return newPermission;
}
- @Override
- public void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException {
-
- if (localFile == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received file parameter is null");
- }
- }
-
- @Override
- public void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException {
-
- if (localFile == null || localUser == null || permission == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received null parameters: localFile=" + localFile
- + " localUser=" + localUser + " permission=" + permission);
- }
- }
-
- @Override
- public void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission)
- throws IllegalArgumentException {
-
- if (localFile == null || permission == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received null parameters: localFile=" + localFile
- + " permission=" + permission);
- }
- }
-
- @Override
- public void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser,
- FilesystemPermission permission) throws IllegalArgumentException {
-
- if (localFile == null || localUser == null || permission == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received null parameters: localFile=" + localFile
- + " localUser=" + localUser + " permission=" + permission);
- }
- }
-
- @Override
- public void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission)
- throws IllegalArgumentException {
-
- if (localFile == null || permission == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received null parameters: localFile=" + localFile
- + " permission=" + permission);
- }
- }
-
- @Override
- public void moveHttpsPermissions(LocalFile fromLocalFile, LocalFile toLocalFile)
- throws IllegalArgumentException {
-
- if (fromLocalFile == null || toLocalFile == null) {
- throw new IllegalArgumentException(
- "Unable to perform the operation. The received null parameters: fromLocalFile="
- + fromLocalFile + " toLocalFile=" + toLocalFile);
- }
- }
-
}
diff --git a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
index c990a3c74..8d6f7d0b8 100644
--- a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
+++ b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java
@@ -18,8 +18,8 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.config.Configuration;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.CrusherScheduler;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.scheduler.SchedulerStatus;
@@ -281,31 +281,4 @@ synchronized public boolean abortRequest(TRequestToken rt) {
return true;
}
- /**
- * Method used to remove chunks of the request identified by the supplied TRequestToken, with
- * surls given by the collection c. Chunks in the DB get their status changed and so will not be
- * considered for processing.
- *
- * If a null TRequestToken or Collection is supplied, or some other abort request has been issued,
- * then FALSE is returned; otherwise TRUE is returned.
- */
- synchronized public boolean abortChunksOfRequest(TRequestToken rt, Collection c) {
-
- if (abort) {
-
- return false;
- }
-
- if ((rt == null) || (c == null)) {
-
- return false;
- }
-
- abortToken = rt;
- abortSURLS = c;
- abort = true;
-
- return true;
- }
-
}
diff --git a/src/main/java/it/grid/storm/asynch/BoL.java b/src/main/java/it/grid/storm/asynch/BoL.java
index cb37db2bb..32cc02cef 100644
--- a/src/main/java/it/grid/storm/asynch/BoL.java
+++ b/src/main/java/it/grid/storm/asynch/BoL.java
@@ -17,11 +17,9 @@
package it.grid.storm.asynch;
-import it.grid.storm.authz.AuthzDirector;
-import it.grid.storm.authz.SpaceAuthzInterface;
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.BoLData;
-import it.grid.storm.catalogs.RequestData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.catalogs.surl.SURLStatusManager;
import it.grid.storm.catalogs.surl.SURLStatusManagerFactory;
import it.grid.storm.common.types.SizeUnit;
@@ -35,21 +33,18 @@
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.model.BoLData;
+import it.grid.storm.persistence.model.RequestData;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.Streets;
-import it.grid.storm.space.SpaceHelper;
import it.grid.storm.srm.types.TReturnStatus;
import it.grid.storm.srm.types.TSURL;
import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TSpaceToken;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.tape.recalltable.TapeRecallCatalog;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
* Class that represents a chunk of an srmBringOnLine request: it handles a single file of a
* multifile/directory-expansion request. StoRM then sends the chunk to a chunk-scheduler. Security
@@ -231,20 +226,6 @@ public void doIt() {
}
}
- SpaceHelper sp = new SpaceHelper();
- TSpaceToken token = sp.getTokenFromStoRI(log, fileStoRI);
- SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token);
-
- if (!spaceAuth.authorize(gu, SRMSpaceRequest.BOL)) {
- String emsg =
- String.format("Space authorization denied %s" + " in Storage Area: %s", surl, token);
- log.debug(emsg);
- requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg);
- failure = true;
- printOutcome(dn, surl, requestData.getStatus());
- return;
- }
-
manageIsPermit(fileStoRI);
printOutcome(dn, surl, requestData.getStatus());
}
diff --git a/src/main/java/it/grid/storm/asynch/BoLFeeder.java b/src/main/java/it/grid/storm/asynch/BoLFeeder.java
index 483bfe179..078895705 100644
--- a/src/main/java/it/grid/storm/asynch/BoLFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/BoLFeeder.java
@@ -18,10 +18,7 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.BoLChunkCatalog;
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException;
import it.grid.storm.namespace.InvalidDescendantsFileRequestException;
@@ -31,6 +28,9 @@
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
diff --git a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
index 5979c88a5..a3e145d25 100644
--- a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java
@@ -18,9 +18,9 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.BoLChunkCatalog;
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
diff --git a/src/main/java/it/grid/storm/asynch/BuilderException.java b/src/main/java/it/grid/storm/asynch/BuilderException.java
index 9513c5b8e..87e4f71f1 100644
--- a/src/main/java/it/grid/storm/asynch/BuilderException.java
+++ b/src/main/java/it/grid/storm/asynch/BuilderException.java
@@ -36,14 +36,4 @@ public BuilderException(String message) {
super(message);
}
-
- public BuilderException(Throwable cause) {
-
- super(cause);
- }
-
- public BuilderException(String message, Throwable cause) {
-
- super(message, cause);
- }
}
diff --git a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
index 2b138f2a3..71f1616e2 100644
--- a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
+++ b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java
@@ -17,8 +17,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.ChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
+import it.grid.storm.persistence.model.ChunkData;
import it.grid.storm.srm.types.TRequestToken;
import it.grid.storm.srm.types.TReturnStatus;
import it.grid.storm.srm.types.TStatusCode;
diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java
deleted file mode 100644
index d56575f67..000000000
--- a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.asynch;
-
-import it.grid.storm.catalogs.BoLPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
-import it.grid.storm.griduser.GridUserInterface;
-
-/**
- * This class represents an Exception thrown when a BoLChunk is created with any null attribute:
- * GridUser, RequestSummaryData, BoLChunkData or GlobalStatusManager.
- *
- * @author: CNAF
- * @version: 1.0
- * @date: Aug 2009
- */
-public class InvalidBoLChunkAttributesException extends Exception {
-
- private static final long serialVersionUID = 2320080131526579634L;
-
- private final boolean nullGu; // true if GridUser is null
- private final boolean nullRsd; // true if RequestSummaryData is null
- private final boolean nullChunkData; // true if BoLChunkData is null
- private final boolean nullGlobalStatusManager; // true if gsm is null
-
- /**
- * Constructor that requires the GridUser, RequestSummaryData, BoLChunkData and
- * GlobalStatusManager that caused the exception to be thrown.
- */
- public InvalidBoLChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd,
- BoLPersistentChunkData chunkData, GlobalStatusManager gsm) {
-
- nullGu = (gu == null);
- nullRsd = (rsd == null);
- nullChunkData = (chunkData == null);
- nullGlobalStatusManager = (gsm == null);
- }
-
- @Override
- public String toString() {
-
- return String.format(
- "Invalid attributes when creating BoLChunk: "
- + "nullGridUser=%b; nullRequestSumamryData=%b; nullBoLChunkData=%b; "
- + "nullGlobalStatusManager=%b",
- nullGu, nullRsd, nullChunkData, nullGlobalStatusManager);
- }
-}
diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
index 8a3af9060..2a9039328 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java
@@ -17,8 +17,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a BoLFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
index 9e55f6fe0..bef52a491 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java
@@ -11,9 +11,9 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.PersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* This class represents an Exceptin thrown when a PtPChunk is created with any null attribute:
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
index cf3333a4f..2ce14206a 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java
@@ -1,7 +1,7 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.PtGData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PtGData;
/**
* @author Michele Dibenedetto
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java
deleted file mode 100644
index 12a830e62..000000000
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.asynch;
-
-import it.grid.storm.catalogs.RequestSummaryData;
-import it.grid.storm.catalogs.PtGData;
-import it.grid.storm.griduser.GridUserInterface;
-
-/**
- * This class represents an Exceptin thrown when a PtGChunk is created with any null attribute:
- * GridUser, RequestSummaryData, PtGChunkData or GlobalStatusManager.
- *
- * @author: EGRID - ICTP Trieste
- * @version: 2.0
- * @date: May 16th, 2005
- */
-public class InvalidPtGChunkAttributesException extends InvalidPtGAttributesException {
-
- /**
- *
- */
- private static final long serialVersionUID = 754275707315797289L;
- /**
- * true if RequestSummaryData is null
- */
- private final boolean nullRsd;
-
- /**
- * true if gsm is null
- */
- private final boolean nullGlobalStatusManager;
-
- /**
- * Constructor that requires the GridUser, RequestSummaryData, PtGChunkData and
- * GlobalStatusManager that caused the exception to be thrown.
- */
- public InvalidPtGChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd,
- PtGData chunkData, GlobalStatusManager gsm) {
-
- super(gu, chunkData);
- nullRsd = (rsd == null);
- nullGlobalStatusManager = (gsm == null);
- }
-
- @Override
- public String toString() {
-
- return String.format(
- "Invalid attributes when creating PtGChunk: "
- + "null-GridUser=%b, null-RequestSumamryData=%b, null-PtGChunkData=%b, "
- + "null-GlobalStatusManager=%b",
- nullGu, nullRsd, nullChunkData, nullGlobalStatusManager);
- }
-}
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
index ce8793add..aacf3c43a 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java
@@ -17,8 +17,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a PtGFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
index 748715ae4..638b026f4 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java
@@ -17,8 +17,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestSummaryData;
/**
* Class that represents an Exception thrown when a PtPFeeder could not be created because the
diff --git a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
index ae0279eb7..fd7fc4522 100644
--- a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
+++ b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java
@@ -11,8 +11,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.RequestData;
/**
* @author Michele Dibenedetto
diff --git a/src/main/java/it/grid/storm/asynch/PtG.java b/src/main/java/it/grid/storm/asynch/PtG.java
index 260a5c621..645da72e9 100644
--- a/src/main/java/it/grid/storm/asynch/PtG.java
+++ b/src/main/java/it/grid/storm/asynch/PtG.java
@@ -11,13 +11,17 @@
package it.grid.storm.asynch;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.acl.AclManagerFS;
import it.grid.storm.authz.AuthzDecision;
import it.grid.storm.authz.AuthzDirector;
-import it.grid.storm.authz.SpaceAuthzInterface;
import it.grid.storm.authz.path.model.SRMFileRequest;
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.PtGData;
import it.grid.storm.catalogs.VolatileAndJiTCatalog;
import it.grid.storm.catalogs.surl.SURLStatusManager;
import it.grid.storm.catalogs.surl.SURLStatusManagerFactory;
@@ -42,14 +46,13 @@
import it.grid.storm.namespace.model.DefaultACL;
import it.grid.storm.namespace.model.Protocol;
import it.grid.storm.persistence.exceptions.DataAccessException;
+import it.grid.storm.persistence.model.PtGData;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.Streets;
-import it.grid.storm.space.SpaceHelper;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
import it.grid.storm.srm.types.TSURL;
import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TSpaceToken;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
import it.grid.storm.synchcall.command.CommandHelper;
@@ -58,13 +61,6 @@
import it.grid.storm.tape.recalltable.TapeRecallCatalog;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
-import java.util.Arrays;
-import java.util.Calendar;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
public class PtG implements Delegable, Chooser, Request, Suspendedable {
protected static final String SRM_COMMAND = "srmPrepareToGet";
@@ -266,214 +262,196 @@ private void downgradeToAnonymousHttpRequest() {
*/
private void manageIsPermit(StoRI fileStoRI) {
- TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fileStoRI);
- SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token);
+ if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) {
+ /* File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH */
+ requestData.changeStatusSRM_INVALID_PATH(
+ "The requested file either does not exist, or it is a directory!");
+ failure = true;
+ log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on"
+ + " file, yet file does not exist physically! Or, an srmPrepareToGet"
+ + " was attempted on a directory!");
+ return;
+ }
- boolean isSpaceAuthorized;
- if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
- isSpaceAuthorized =
- spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTG);
- } else {
- isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTG);
+ /* File exists and it is not a directory */
+ /* Sets traverse permissions on file parent folders */
+ boolean canTraverse;
+ try {
+ canTraverse = managePermitTraverseStep(fileStoRI);
+ } catch (CannotMapUserException e) {
+ String explanation = "Unable to find local user for " + DataHelper.getRequestor(requestData);
+ requestData.changeStatusSRM_FAILURE(explanation);
+ failure = true;
+ log.error("{}! CannotMapUserException: {}", explanation, e.getMessage(), e);
+ return;
+ }
+
+ if (!canTraverse) {
+ String explanation = "Cannot travers parents";
+ requestData.changeStatusSRM_FAILURE(explanation);
+ log.error(explanation);
+ failure = true;
+ return;
}
- if (isSpaceAuthorized) {
+
+ try {
+
+ TTURL turl;
try {
- if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) {
- /*
- * File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH!
- */
- requestData.changeStatusSRM_INVALID_PATH(
- "The requested file either" + " does not exist, or it is a directory!");
+ turl = fileStoRI.getTURL(requestData.getTransferProtocols());
+ } catch (TURLBuildingException e) {
+ requestData
+ .changeStatusSRM_FAILURE("Unable to build the TURL for the provided transfer protocol");
+ failure = true;
+ log.error("ERROR in PtGChunk! There was a failure building the "
+ + "TURL. : TURLBuildingException {}", e.getMessage(), e);
+ return;
+ } catch (IllegalArgumentException e) {
+ /*
+ * Handle null TURL prefix! This is a programming error: it should not occur!
+ */
+ requestData.changeStatusSRM_FAILURE("Unable to decide TURL!");
+ failure = true;
+ log.error(
+ "ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData "
+ + "caused StoRI to be unable to establish TTURL! " + "IllegalArgumentException: {}",
+ e.getMessage(), e);
+ return;
+ } catch (InvalidGetTURLProtocolException e) {
+ requestData.changeStatusSRM_FAILURE("Unable to decide TURL!");
+ failure = true;
+ log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData "
+ + "caused StoRI to be unable to establish TTURL! "
+ + "InvalidGetTURLProtocolException: {}", e.getMessage(), e);
+ return;
+ }
+ if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
+ /* Compute the Expiration Time in seconds */
+ long expDate = (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value());
+ StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate);
+
+
+ try {
+ TSizeInBytes fileSize =
+ TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
+
+ requestData.setFileSize(fileSize);
+ log.debug("File size: {}", fileSize);
+
+ } catch (InvalidTSizeAttributesException e) {
+ requestData.changeStatusSRM_FAILURE("Unable to determine file size");
failure = true;
- log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on"
- + " file, yet file does not exist physically! Or, an srmPrepareToGet"
- + " was attempted on a directory!");
- } else {
- /* File exists and it is not a directory */
- /* Sets traverse permissions on file parent folders */
- boolean canTraverse;
+ log.error("ERROR in PtGChunk! error in file size computation! "
+ + "InvalidTSizeAttributesException: {}", e.getMessage(), e);
+ return;
+ }
+ }
+ boolean isOnDisk;
+ try {
+ isOnDisk = isStoriOndisk(fileStoRI);
+ } catch (FSException e) {
+ requestData.changeStatusSRM_FAILURE("Unable to verify file disk status");
+ failure = true;
+ log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}",
+ e.getMessage(), e);
+ return;
+ }
+ if (!isOnDisk && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
+ requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape");
+ String voName = null;
+ if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
+ if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) {
+ voName =
+ ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO().getValue();
+ }
+ }
+ try {
+ new TapeRecallCatalog().insertTask(this, voName,
+ fileStoRI.getLocalFile().getAbsolutePath());
+ } catch (DataAccessException e) {
+ requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape");
+ failure = true;
+ log.error("ERROR in PtGChunk! error in tape recall task "
+ + "insertion! DataAccessException: {}", e.getMessage(), e);
+ return;
+ }
+ /* Stores the parameters in this object */
+ if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
try {
- canTraverse = managePermitTraverseStep(fileStoRI);
+ backupData(fileStoRI, fileStoRI.getLocalFile(),
+ ((IdentityInputData) requestData).getUser().getLocalUser(), turl);
} catch (CannotMapUserException e) {
requestData.changeStatusSRM_FAILURE(
"Unable to find local user for " + DataHelper.getRequestor(requestData));
failure = true;
log.error(
- "ERROR in PtGChunk! Unable to find LocalUser for {}! "
- + "CannotMapUserException: {}",
+ "ERROR in PtGChunk! Unable to find LocalUser "
+ + "for {}! CannotMapUserException: {}",
DataHelper.getRequestor(requestData), e.getMessage(), e);
return;
}
- if (canTraverse) {
- TTURL turl;
- try {
- turl = fileStoRI.getTURL(requestData.getTransferProtocols());
- } catch (TURLBuildingException e) {
- requestData.changeStatusSRM_FAILURE(
- "Unable to build the TURL for the provided transfer protocol");
- failure = true;
- log.error("ERROR in PtGChunk! There was a failure building the "
- + "TURL. : TURLBuildingException {}", e.getMessage(), e);
- return;
- } catch (IllegalArgumentException e) {
- /*
- * Handle null TURL prefix! This is a programming error: it should not occur!
- */
- requestData.changeStatusSRM_FAILURE("Unable to decide TURL!");
- failure = true;
- log.error("ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData "
- + "caused StoRI to be unable to establish TTURL! "
- + "IllegalArgumentException: {}", e.getMessage(), e);
- return;
- } catch (InvalidGetTURLProtocolException e) {
- requestData.changeStatusSRM_FAILURE("Unable to decide TURL!");
- failure = true;
- log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData "
- + "caused StoRI to be unable to establish TTURL! "
- + "InvalidGetTURLProtocolException: {}", e.getMessage(), e);
- return;
- }
- if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- /* Compute the Expiration Time in seconds */
- long expDate =
- (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value());
- StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate);
-
-
- try {
- TSizeInBytes fileSize =
- TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
-
- requestData.setFileSize(fileSize);
- log.debug("File size: {}", fileSize);
-
- } catch (InvalidTSizeAttributesException e) {
- requestData.changeStatusSRM_FAILURE("Unable to determine file size");
- failure = true;
- log.error("ERROR in PtGChunk! error in file size computation! "
- + "InvalidTSizeAttributesException: {}", e.getMessage(), e);
- return;
- }
- }
- boolean isOnDisk;
- try {
- isOnDisk = isStoriOndisk(fileStoRI);
- } catch (FSException e) {
- requestData.changeStatusSRM_FAILURE("Unable to verify file disk status");
- failure = true;
- log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}",
- e.getMessage(), e);
- return;
- }
- if (!isOnDisk
- && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape");
- String voName = null;
- if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
- if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) {
- voName = ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO()
- .getValue();
- }
- }
- try {
- new TapeRecallCatalog().insertTask(this, voName,
- fileStoRI.getLocalFile().getAbsolutePath());
- } catch (DataAccessException e) {
- requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape");
- failure = true;
- log.error("ERROR in PtGChunk! error in tape recall task "
- + "insertion! DataAccessException: {}", e.getMessage(), e);
- return;
- }
- /* Stores the parameters in this object */
- if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
- try {
- backupData(fileStoRI, fileStoRI.getLocalFile(),
- ((IdentityInputData) requestData).getUser().getLocalUser(), turl);
- } catch (CannotMapUserException e) {
- requestData.changeStatusSRM_FAILURE(
- "Unable to find local user for " + DataHelper.getRequestor(requestData));
- failure = true;
- log.error(
- "ERROR in PtGChunk! Unable to find LocalUser "
- + "for {}! CannotMapUserException: {}",
- DataHelper.getRequestor(requestData), e.getMessage(), e);
- return;
- }
- } else {
- backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl);
- }
-
- /*
- * The request now ends by saving in the DB the IN_PROGRESS status information. The
- * effective PtG will be accomplished when the setTaskStatus() method of the
- * tapeRecallDAO calls the completeRequest() method.
- */
- } else {
- /*
- * Set the read permission for the user on the localfile and any default ace specified
- * in the story files
- */
- boolean canRead;
- try {
- canRead = managePermitReadFileStep(fileStoRI, turl);
- } catch (CannotMapUserException e) {
- requestData.changeStatusSRM_FAILURE(
- "Unable to find local user for " + DataHelper.getRequestor(requestData));
- failure = true;
- log.error(
- "ERROR in PtGChunk! Unable to find LocalUser for {}! "
- + "CannotMapUserException: {}",
- DataHelper.getRequestor(requestData), e.getMessage(), e);
- return;
- }
- if (canRead) {
-
- try {
- TSizeInBytes fileSize =
- TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
-
- requestData.setFileSize(fileSize);
- log.debug("File size: {}", fileSize);
-
- } catch (InvalidTSizeAttributesException e) {
- requestData.changeStatusSRM_FAILURE("Unable to determine file size");
- failure = true;
- log.error("ERROR in PtGChunk! error in file size computation! "
- + "InvalidTSizeAttributesException: {}", e.getMessage(), e);
- return;
- }
-
- requestData.setTransferURL(turl);
- requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!");
- } else {
- requestData.changeStatusSRM_FAILURE(
- "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!");
- }
- }
- } else {
- // FIXME roll back Read, and Traverse URGENT!
- }
+ } else {
+ backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl);
}
- } catch (SecurityException e) {
+
/*
- * The check for existence of the File failed because there is a SecurityManager installed
- * that denies read privileges for that File! Perhaps the local system administrator of
- * StoRM set up Java policies that contrast policies described by the PolicyCollector! There
- * is a conflict here!
+ * The request now ends by saving in the DB the IN_PROGRESS status information. The
+ * effective PtG will be accomplished when the setTaskStatus() method of the tapeRecallDAO
+ * calls the completeRequest() method.
*/
- requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!");
- failure = true;
- log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException "
- + "from Java SecurityManager; StoRM cannot check-existence or "
- + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e);
+ } else {
+ /*
+ * Set the read permission for the user on the localfile and any default ace specified in
+ * the story files
+ */
+ boolean canRead;
+ try {
+ canRead = managePermitReadFileStep(fileStoRI, turl);
+ } catch (CannotMapUserException e) {
+ requestData.changeStatusSRM_FAILURE(
+ "Unable to find local user for " + DataHelper.getRequestor(requestData));
+ failure = true;
+ log.error(
+ "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}",
+ DataHelper.getRequestor(requestData), e.getMessage(), e);
+ return;
+ }
+ if (canRead) {
+
+ try {
+ TSizeInBytes fileSize =
+ TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES);
+
+ requestData.setFileSize(fileSize);
+ log.debug("File size: {}", fileSize);
+
+ } catch (InvalidTSizeAttributesException e) {
+ requestData.changeStatusSRM_FAILURE("Unable to determine file size");
+ failure = true;
+ log.error("ERROR in PtGChunk! error in file size computation! "
+ + "InvalidTSizeAttributesException: {}", e.getMessage(), e);
+ return;
+ }
+
+ requestData.setTransferURL(turl);
+ requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!");
+ } else {
+ requestData.changeStatusSRM_FAILURE(
+ "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!");
+ }
}
- } else {
- String emsg = String.format("Read access to %s in Storage Area: %s " + "denied!",
- requestData.getSURL(), token);
- requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg);
+ } catch (SecurityException e) {
+ /*
+ * The check for existence of the File failed because there is a SecurityManager installed
+ * that denies read privileges for that File! Perhaps the local system administrator of StoRM
+ * set up Java policies that contrast policies described by the PolicyCollector! There is a
+ * conflict here!
+ */
+ requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!");
failure = true;
- log.debug(emsg);
+ log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException "
+ + "from Java SecurityManager; StoRM cannot check-existence or "
+ + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e);
}
}
@@ -491,24 +469,13 @@ private void manageIsPermit(StoRI fileStoRI) {
private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserException {
- if (!downgradedToAnonymous && requestData instanceof IdentityInputData) {
-
- if (!setupACLs)
- return verifyPath(fileStoRI);
+ if (!downgradedToAnonymous && requestData instanceof IdentityInputData && setupACLs) {
return verifyPath(fileStoRI)
&& setParentsAcl(fileStoRI, ((IdentityInputData) requestData).getUser().getLocalUser());
}
- if (verifyPath(fileStoRI)) {
-
- if (setupACLs)
- setHttpsServiceParentAcl(fileStoRI);
-
- return true;
- }
-
- return false;
+ return verifyPath(fileStoRI);
}
private boolean verifyPath(StoRI fileStoRI) {
@@ -582,7 +549,6 @@ private boolean managePermitReadFileStep(StoRI fileStoRI, TTURL turl)
if (setupACLs) {
setDefaultAcl(fileStoRI, fileStoRI.getLocalFile());
- setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.Read);
}
return true;
@@ -699,29 +665,6 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis
return true;
}
- private void setHttpsServiceParentAcl(StoRI fileStoRI) {
-
- log.debug("Adding parent https ACL for directory : '{}' parents", fileStoRI.getAbsolutePath());
-
- for (StoRI parentStoRI : fileStoRI.getParents()) {
- setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse);
- }
- }
-
- private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) {
-
- log.debug("Adding https ACL {} for directory : '{}'", permission, file);
-
- try {
- AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission);
- } catch (IllegalArgumentException e) {
- log.error("Unable to grant user permission on the created folder. "
- + "IllegalArgumentException: {}", e.getMessage(), e);
- requestData.getStatus()
- .extendExplaination("Unable to grant group permission on the created folder");
- }
- }
-
private void setDefaultAcl(StoRI fileStoRI, LocalFile localFile) {
/* Manage DefaultACL */
diff --git a/src/main/java/it/grid/storm/asynch/PtGBuilder.java b/src/main/java/it/grid/storm/asynch/PtGBuilder.java
index e9d942105..f7915ae5d 100644
--- a/src/main/java/it/grid/storm/asynch/PtGBuilder.java
+++ b/src/main/java/it/grid/storm/asynch/PtGBuilder.java
@@ -17,15 +17,16 @@
package it.grid.storm.asynch;
-import it.grid.storm.asynch.BuilderException;
-import it.grid.storm.asynch.PtG;
-import it.grid.storm.catalogs.AnonymousPtGData;
-import it.grid.storm.catalogs.IdentityPtGData;
-import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException;
-import it.grid.storm.catalogs.InvalidPtGDataAttributesException;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
-import it.grid.storm.catalogs.PtGData;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.common.types.TURLPrefix;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.AnonymousPtGData;
+import it.grid.storm.persistence.model.IdentityPtGData;
+import it.grid.storm.persistence.model.PtGData;
import it.grid.storm.srm.types.TDirOption;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TReturnStatus;
@@ -35,8 +36,6 @@
import it.grid.storm.srm.types.TTURL;
import it.grid.storm.synchcall.data.IdentityInputData;
import it.grid.storm.synchcall.data.datatransfer.FileTransferInputData;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* @author Michele Dibenedetto
diff --git a/src/main/java/it/grid/storm/asynch/PtGFeeder.java b/src/main/java/it/grid/storm/asynch/PtGFeeder.java
index 0a73c85d1..b4c08e6ca 100644
--- a/src/main/java/it/grid/storm/asynch/PtGFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/PtGFeeder.java
@@ -17,11 +17,8 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
import it.grid.storm.catalogs.PtGChunkCatalog;
-import it.grid.storm.catalogs.PtGPersistentChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException;
import it.grid.storm.namespace.InvalidDescendantsFileRequestException;
import it.grid.storm.namespace.InvalidDescendantsPathRequestException;
@@ -30,6 +27,9 @@
import it.grid.storm.namespace.NamespaceException;
import it.grid.storm.namespace.StoRI;
import it.grid.storm.namespace.UnapprochableSurlException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
diff --git a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
index cba0e3227..40a6dca6d 100644
--- a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java
@@ -19,9 +19,9 @@
import java.util.Arrays;
import it.grid.storm.catalogs.PtGChunkCatalog;
-import it.grid.storm.catalogs.PtGData;
-import it.grid.storm.catalogs.PtGPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
+import it.grid.storm.persistence.model.PtGData;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.synchcall.command.CommandHelper;
diff --git a/src/main/java/it/grid/storm/asynch/PtP.java b/src/main/java/it/grid/storm/asynch/PtP.java
index 257aedb8f..b577a6a91 100644
--- a/src/main/java/it/grid/storm/asynch/PtP.java
+++ b/src/main/java/it/grid/storm/asynch/PtP.java
@@ -22,10 +22,7 @@
import it.grid.storm.acl.AclManagerFS;
import it.grid.storm.authz.AuthzDecision;
import it.grid.storm.authz.AuthzDirector;
-import it.grid.storm.authz.SpaceAuthzInterface;
import it.grid.storm.authz.path.model.SRMFileRequest;
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.catalogs.PtPData;
import it.grid.storm.catalogs.ReservedSpaceCatalog;
import it.grid.storm.catalogs.VolatileAndJiTCatalog;
import it.grid.storm.catalogs.surl.SURLStatusManager;
@@ -49,6 +46,7 @@
import it.grid.storm.namespace.model.ACLEntry;
import it.grid.storm.namespace.model.DefaultACL;
import it.grid.storm.persistence.exceptions.DataAccessException;
+import it.grid.storm.persistence.model.PtPData;
import it.grid.storm.persistence.model.TransferObjectDecodingException;
import it.grid.storm.scheduler.Chooser;
import it.grid.storm.scheduler.Delegable;
@@ -332,24 +330,6 @@ private void manageOverwriteExistingFile(StoRI fileStoRI) {
*/
private void managePermit(StoRI fileStoRI) {
- TSpaceToken token = new SpaceHelper().getTokenFromStoRI(PtP.log, fileStoRI);
- SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token);
-
- boolean isSpaceAuthorized;
- if (requestData instanceof IdentityInputData) {
- isSpaceAuthorized =
- spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTP);
- } else {
- isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTP);
- }
- if (!isSpaceAuthorized) {
- requestData.changeStatusSRM_AUTHORIZATION_FAILURE("Create/Write access for "
- + requestData.getSURL() + " in Storage Area: " + token + " denied!");
- failure = true;
- log.debug("Create/Write access for {} in Storage Area: {} denied!", requestData.getSURL(),
- token);
- return;
- }
TTURL auxTURL;
try {
auxTURL = fileStoRI.getTURL(requestData.getTransferProtocols());
@@ -445,7 +425,6 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx
return setParentAcl(fileStoRI, user);
}
- setHttpsServiceParentAcl(fileStoRI);
return true;
}
@@ -559,7 +538,6 @@ private boolean managePermitSetFileStep(StoRI fileStoRI) throws CannotMapUserExc
setDefaultAcl(fileStoRI);
setTapeManagementAcl(fileStoRI);
- setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.ReadWrite);
return true;
}
@@ -674,29 +652,6 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis
return response;
}
- private void setHttpsServiceParentAcl(StoRI fileStoRI) {
-
- log.debug("SrmMkdir: Adding parent https ACL for directory: '{}' parents",
- fileStoRI.getAbsolutePath());
- for (StoRI parentStoRI : fileStoRI.getParents()) {
- setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse);
- }
- }
-
- private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) {
-
- log.debug("SrmMkdir: Adding https ACL {} for directory : '{}'", permission, file);
-
- try {
- AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission);
- } catch (IllegalArgumentException e) {
- log.error("Unable to grant user permission on the created folder. "
- + "IllegalArgumentException: {}", e.getMessage(), e);
- requestData.getStatus()
- .extendExplaination("Unable to grant group permission on the created folder");
- }
- }
-
/**
* Private method used to manage ReserveSpace. Returns false if something went wrong!
*/
@@ -868,7 +823,7 @@ private boolean isExistingSpaceToken(TSpaceToken spaceToken) throws Exception {
StorageSpaceData spaceData = null;
try {
- spaceData = new ReservedSpaceCatalog().getStorageSpace(spaceToken);
+ spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken);
} catch (TransferObjectDecodingException e) {
log.error("Unable to build StorageSpaceData from StorageSpaceTO."
+ " TransferObjectDecodingException: {}", e.getMessage());
diff --git a/src/main/java/it/grid/storm/asynch/PtPBuilder.java b/src/main/java/it/grid/storm/asynch/PtPBuilder.java
index 38150ea60..a04317c88 100644
--- a/src/main/java/it/grid/storm/asynch/PtPBuilder.java
+++ b/src/main/java/it/grid/storm/asynch/PtPBuilder.java
@@ -19,14 +19,15 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import it.grid.storm.catalogs.AnonymousPtPData;
-import it.grid.storm.catalogs.IdentityPtPData;
-import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException;
-import it.grid.storm.catalogs.InvalidPtPDataAttributesException;
-import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException;
-import it.grid.storm.catalogs.PtPData;
+
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.config.Configuration;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.model.AnonymousPtPData;
+import it.grid.storm.persistence.model.IdentityPtPData;
+import it.grid.storm.persistence.model.PtPData;
import it.grid.storm.srm.types.TFileStorageType;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TOverwriteMode;
diff --git a/src/main/java/it/grid/storm/asynch/PtPFeeder.java b/src/main/java/it/grid/storm/asynch/PtPFeeder.java
index 65486ac20..c1df96972 100644
--- a/src/main/java/it/grid/storm/asynch/PtPFeeder.java
+++ b/src/main/java/it/grid/storm/asynch/PtPFeeder.java
@@ -18,10 +18,10 @@
package it.grid.storm.asynch;
import it.grid.storm.catalogs.PtPChunkCatalog;
-import it.grid.storm.catalogs.PtPPersistentChunkData;
import it.grid.storm.catalogs.RequestSummaryCatalog;
-import it.grid.storm.catalogs.RequestSummaryData;
import it.grid.storm.griduser.GridUserInterface;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.Delegable;
import it.grid.storm.scheduler.SchedulerException;
import it.grid.storm.srm.types.TSURL;
diff --git a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
index 65f830d32..686649ec5 100644
--- a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
+++ b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java
@@ -13,9 +13,9 @@
import java.util.Arrays;
import it.grid.storm.catalogs.PtPChunkCatalog;
-import it.grid.storm.catalogs.PtPData;
-import it.grid.storm.catalogs.PtPPersistentChunkData;
-import it.grid.storm.catalogs.RequestSummaryData;
+import it.grid.storm.persistence.model.PtPData;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.RequestSummaryData;
import it.grid.storm.scheduler.PersistentRequestChunk;
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.synchcall.command.CommandHelper;
diff --git a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java b/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java
deleted file mode 100644
index 7d036c706..000000000
--- a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.asynch;
-
-import it.grid.storm.srm.types.TRequestToken;
-
-/**
- * Class that represents a reply to an issued SRMPrepareToPut command. It provides a method to
- * recover the assigned request token.
- *
- * @author EGRID - ICTP Trieste
- * @version 1.0
- * @date September, 2005
- */
-public class SRMPrepareToPutReply {
-
- // TRequestToken assigned during the SRM prepare to put operation
- private TRequestToken requestToken = null;
-
- /**
- * Constructor that requires the assigned TRequestToken; if it is null, an
- * InvalidPutReplyAttributeException is thrown.
- */
- public SRMPrepareToPutReply(TRequestToken requestToken) throws InvalidPutReplyAttributeException {
-
- if (requestToken == null)
- throw new InvalidPutReplyAttributeException();
- this.requestToken = requestToken;
- }
-
- /**
- * Method that returns the assigned request token.
- */
- public TRequestToken requestToken() {
-
- return requestToken;
- }
-
- public String toString() {
-
- return "requestToken=" + requestToken;
- }
-}
diff --git a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java b/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java
deleted file mode 100644
index 215356c2b..000000000
--- a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.asynch;
-
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * Class that represents the reply received from issuing an srmPutDone command.
- *
- * @author EGRID ICTP Trieste
- * @version 1.0
- * @date August 2006
- */
-public class SRMPutDoneReply {
-
- private TReturnStatus overallRetStat = null; // overall request return status
-
- /**
- * Constructor that requires the overall TReturnStatus of the reply.
- */
- public SRMPutDoneReply(TReturnStatus overallRetStat)
- throws InvalidPutDoneReplyAttributeException {
-
- if (overallRetStat == null)
- throw new InvalidPutDoneReplyAttributeException();
- this.overallRetStat = overallRetStat;
- }
-
- /**
- * Method that returns the overll status of the request.
- */
- public TReturnStatus overallRetStat() {
-
- return overallRetStat;
- }
-
- public String toString() {
-
- return "SRMPutDoneReply: overall TReturnStatus is " + overallRetStat.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java b/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java
deleted file mode 100644
index f304c6d81..000000000
--- a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.asynch;
-
-import it.grid.storm.srm.types.TTURL;
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * Class that represents the reply returned from an invocation of SRMStatusOfPutRequest. It supplies
- * methods for quering the toTURL assigned, and the returnStatus of the request.
- *
- * @author EGRID - ICTP Trieste
- * @version 1.0
- * @date September 2005
- */
-public class SRMStatusOfPutRequestReply {
-
- private TTURL toTURL = null; // TTURL as supplied by the invoked server in the
- // SRMStatusOfPutRequest
- private TReturnStatus returnStatus = null; // returnStatus as supplied by the
- // invoked server in the
- // SRMStatusOfPutRequest
-
- public SRMStatusOfPutRequestReply(TTURL toTURL, TReturnStatus returnStatus)
- throws InvalidPutStatusAttributesException {
-
- if ((toTURL == null) || (returnStatus == null))
- throw new InvalidPutStatusAttributesException(toTURL, returnStatus);
- this.toTURL = toTURL;
- this.returnStatus = returnStatus;
- }
-
- /**
- * Method that returns the toTURL that the invoked server assigned to the put request.
- */
- public TTURL toTURL() {
-
- return toTURL;
- }
-
- /**
- * Method that returns the TReturnStatus that the invoked server assigned to the put request.
- */
- public TReturnStatus returnStatus() {
-
- return returnStatus;
- }
-
- public String toString() {
-
- return "toTURL= " + toTURL + "; returnStatus=" + returnStatus;
- }
-}
diff --git a/src/main/java/it/grid/storm/asynch/Suspendedable.java b/src/main/java/it/grid/storm/asynch/Suspendedable.java
index cfd887e00..970996c54 100644
--- a/src/main/java/it/grid/storm/asynch/Suspendedable.java
+++ b/src/main/java/it/grid/storm/asynch/Suspendedable.java
@@ -17,7 +17,7 @@
package it.grid.storm.asynch;
-import it.grid.storm.catalogs.RequestData;
+import it.grid.storm.persistence.model.RequestData;
import it.grid.storm.tape.recalltable.model.TapeRecallStatus;
public interface Suspendedable {
diff --git a/src/main/java/it/grid/storm/authz/AuthzDirector.java b/src/main/java/it/grid/storm/authz/AuthzDirector.java
index 154001521..4e65b95f5 100644
--- a/src/main/java/it/grid/storm/authz/AuthzDirector.java
+++ b/src/main/java/it/grid/storm/authz/AuthzDirector.java
@@ -17,159 +17,44 @@
package it.grid.storm.authz;
-import it.grid.storm.authz.path.PathAuthz;
-import it.grid.storm.authz.path.conf.PathAuthzDBReader;
-import it.grid.storm.authz.sa.SpaceDBAuthz;
-import it.grid.storm.authz.sa.test.MockSpaceAuthz;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.NamespaceInterface;
-import it.grid.storm.namespace.VirtualFSInterface;
-import it.grid.storm.namespace.model.SAAuthzType;
-import it.grid.storm.srm.types.TSpaceToken;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class AuthzDirector {
-
- private static final Logger log = LoggerFactory
- .getLogger(AuthzDirector.class);
- private static String configurationPATH;
-
- // Map between 'SpaceToken' and the related 'SpaceAuthz'
- private static Map spaceAuthzs = null;
-
- // PathAuthz is only one, shared by all SAs
- private static PathAuthzInterface pathAuthz = null;
-
- /**
- * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest
- */
- private static Map buildSpaceAuthzsMAP() {
-
- HashMap spaceAuthzMap = new HashMap();
-
- // Retrieve the list of VFS from Namespace
- NamespaceInterface ns = NamespaceDirector.getNamespace();
- ArrayList vfss;
- try {
- vfss = new ArrayList(ns.getAllDefinedVFS());
- for (VirtualFSInterface vfs : vfss) {
- String vfsName = vfs.getAliasName();
- SAAuthzType authzTp = vfs.getStorageAreaAuthzType();
- String authzName = "";
- if (authzTp.equals(SAAuthzType.AUTHZDB)) {
- // The Space Authz is based on Authz DB
- authzName = vfs.getStorageAreaAuthzDB();
- log.debug("Loading AuthzDB '{}'", authzName);
- if (existsAuthzDBFile(authzName)) {
- // Digest the Space AuthzDB File
- TSpaceToken spaceToken = vfs.getSpaceToken();
- SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName);
- spaceAuthzMap.put(spaceToken, spaceAuthz);
- } else {
- log.error("File AuthzDB '{}' related to '{}' does not exists.",
- authzName, vfsName);
- }
- } else {
- authzName = vfs.getStorageAreaAuthzFixed();
- }
- log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName);
- }
- } catch (NamespaceException e) {
- log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e);
- }
-
- return spaceAuthzMap;
- }
-
- /**
- * Utility method
- *
- * @param dbFileName
- * @return
- * @throws AuthzDBReaderException
- */
- private static boolean existsAuthzDBFile(String dbFileName) {
-
- String fileName = configurationPATH + File.separator + dbFileName;
- boolean exists = (new File(fileName)).exists();
- if (!exists) {
- log.warn("The AuthzDB File '{}' does not exists", dbFileName);
- }
- return exists;
- }
-
- // ****************************************
- // PUBLIC METHODS
- // ****************************************
-
- /******************************
- * SPACE AUTHORIZATION ENGINE
- ******************************/
- public static void initializeSpaceAuthz() {
-
- // Build Space Authzs MAP
- spaceAuthzs = buildSpaceAuthzsMAP();
- }
-
- /**
- * Retrieve the Space Authorization module related to the Space Token
- *
- * @param token
- * @return
- */
- public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) {
-
- SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz();
- // Retrieve the SpaceAuthz related to the Space Token
- if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) {
- spaceAuthz = spaceAuthzs.get(token);
- log.debug("Space Authz related to S.Token ='{}' is '{}'", token,
- spaceAuthz.getSpaceAuthzID());
- } else {
- log.debug("Space Authz related to S.Token ='{}' does not exists. "
- + "Use the MOCK one.", token);
- }
- return spaceAuthz;
- }
-
- /******************************
- * PATH AUTHORIZATION ENGINE
- ******************************/
-
- /**
- * Initializating the Path Authorization engine
- *
- * @param pathAuthz2
- */
- public static void initializePathAuthz(String pathAuthzDBFileName)
- throws DirectorException {
-
- PathAuthzDBReader authzDBReader;
- try {
- authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName);
- } catch (Exception e) {
- log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e);
- throw new DirectorException("Unable to build a PathAuthzDBReader");
- }
- AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB());
- }
+import it.grid.storm.authz.path.PathAuthz;
+import it.grid.storm.authz.path.conf.PathAuthzDBReader;
- /**
- * Retrieve the Path Authorization module
- *
- * @todo: To implement this.
- */
- public static PathAuthzInterface getPathAuthz() {
+public class AuthzDirector {
- return AuthzDirector.pathAuthz;
- }
+ private static final Logger log = LoggerFactory.getLogger(AuthzDirector.class);
+
+ // PathAuthz is only one, shared by all SAs
+ private static PathAuthzInterface pathAuthz = null;
+
+ /**
+ * Initialize the Path Authorization engine
+ *
+ * @param pathAuthz2
+ */
+ public static void initializePathAuthz(String pathAuthzDBFileName) throws DirectorException {
+
+ PathAuthzDBReader authzDBReader;
+ try {
+ authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName);
+ } catch (Exception e) {
+ log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e);
+ throw new DirectorException("Unable to build a PathAuthzDBReader");
+ }
+ AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB());
+ }
+
+ /**
+ * Retrieve the Path Authorization module
+ *
+ * @todo: To implement this.
+ */
+ public static PathAuthzInterface getPathAuthz() {
+
+ return AuthzDirector.pathAuthz;
+ }
}
diff --git a/src/main/java/it/grid/storm/authz/AuthzException.java b/src/main/java/it/grid/storm/authz/AuthzException.java
index 945f41e4c..69d2e848a 100644
--- a/src/main/java/it/grid/storm/authz/AuthzException.java
+++ b/src/main/java/it/grid/storm/authz/AuthzException.java
@@ -26,28 +26,18 @@
*/
public class AuthzException extends RuntimeException {
- /**
- *
- */
- private static final long serialVersionUID = 1L;
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
- public AuthzException() {
+ public AuthzException() {
- super();
- }
+ super();
+ }
- public AuthzException(String message) {
+ public AuthzException(String message) {
- super(message);
- }
-
- public AuthzException(String message, Throwable cause) {
-
- super(message, cause);
- }
-
- public AuthzException(Throwable cause) {
-
- super(cause);
- }
+ super(message);
+ }
}
diff --git a/src/main/java/it/grid/storm/authz/DirectorException.java b/src/main/java/it/grid/storm/authz/DirectorException.java
index 7e7382651..a84d37099 100644
--- a/src/main/java/it/grid/storm/authz/DirectorException.java
+++ b/src/main/java/it/grid/storm/authz/DirectorException.java
@@ -2,28 +2,18 @@
public class DirectorException extends Exception {
- /**
- *
- */
- private static final long serialVersionUID = 8391356294029256927L;
+ /**
+ *
+ */
+ private static final long serialVersionUID = 8391356294029256927L;
- public DirectorException() {
+ public DirectorException() {
- }
+ }
- public DirectorException(String message) {
+ public DirectorException(String message) {
- super(message);
- }
-
- public DirectorException(Throwable cause) {
-
- super(cause);
- }
-
- public DirectorException(String message, Throwable cause) {
-
- super(message, cause);
- }
+ super(message);
+ }
}
diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java
index 917c8875d..ecbc9e84b 100644
--- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java
+++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java
@@ -46,13 +46,6 @@ public PathAuthzDB(String pathAuthzDBID, PathAuthzEvaluationAlgorithm algorithm,
this.authzDB.addAll(aces);
}
- public PathAuthzDB(String pathAuthzDBID, List aces) {
-
- this.pathAuthzDBID = pathAuthzDBID;
- this.evaluationAlg = DEFAULT_ALGORITHM;
- this.authzDB.addAll(aces);
- }
-
/**
* Empty constructor. Use it only if there is not
*/
@@ -63,11 +56,6 @@ public PathAuthzDB() {
this.authzDB.add(PathACE.PERMIT_ALL);
}
- public void addPathACE(PathACE pathAce) {
-
- authzDB.add(pathAce);
- }
-
public List getACL() {
return authzDB;
diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
index 7b9b0aebf..02e9a5ea5 100644
--- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
+++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java
@@ -71,15 +71,6 @@ public PathAuthzDBReader(String filename) throws Exception {
log.info(pathAuthzDB.toString());
}
- public void refreshPathAuthzDB() throws Exception {
-
- log.debug(" Start refreshing.");
- pathAuthzDB = loadPathAuthzDB(authzDBFilename);
- log.debug(" End refreshing.");
- log.info("Path Authz DB ('{}') RE-loaded.", pathAuthzDB.getPathAuthzDBID());
- log.info(pathAuthzDB.toString());
- }
-
public PathAuthzDB getPathAuthzDB() {
return pathAuthzDB;
diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java
index 84e791fd5..9cc821191 100644
--- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java
+++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java
@@ -35,6 +35,8 @@
*/
public class PathAuthzAlgBestMatch extends PathAuthzEvaluationAlgorithm {
+ private static PathAuthzAlgBestMatch instance;
+
public static PathAuthzEvaluationAlgorithm getInstance() {
if (instance == null) {
diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java
index 4abf781fb..95a1d1bac 100644
--- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java
+++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java
@@ -28,17 +28,6 @@
*/
public abstract class PathAuthzEvaluationAlgorithm {
- public static PathAuthzEvaluationAlgorithm instance = null;
-
- public static PathAuthzEvaluationAlgorithm getInstance() throws Exception {
-
- if (instance == null) {
- throw new Exception(
- "Unable to provide the instance, my comcrete subclass as not provided any");
- }
- return instance;
- }
-
public abstract AuthzDecision evaluate(String subject, StFN fileName,
SRMFileRequest pathOperation, List acl);
diff --git a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java
index cf4cd53f8..245ad44b1 100644
--- a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java
+++ b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java
@@ -25,12 +25,15 @@
*/
public enum PathOperation {
- WRITE_FILE('W', "WRITE_FILE", "Write data"), READ_FILE('R', "READ_FILE", "Read data",
- true), RENAME('F', "RENAME", "Rename a file or a directory"), DELETE('D', "DELETE",
- "Delete a file or a directory"), LIST_DIRECTORY('L', "LIST_DIRECTORY",
- "Listing a directory",
- true), MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"), CREATE_FILE('N',
- "CREATE_FILE", "Create a new file"), UNDEFINED('?', "UNDEFINED", "Undefined");
+
+ WRITE_FILE('W', "WRITE_FILE", "Write data"),
+ READ_FILE('R', "READ_FILE", "Read data", true),
+ RENAME('F', "RENAME", "Rename a file or a directory"),
+ DELETE('D', "DELETE", "Delete a file or a directory"),
+ LIST_DIRECTORY('L', "LIST_DIRECTORY", "Listing a directory", true),
+ MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"),
+ CREATE_FILE('N', "CREATE_FILE", "Create a new file"),
+ UNDEFINED('?', "UNDEFINED", "Undefined");
private final char operation;
private final String operationName;
@@ -69,8 +72,6 @@ public static PathOperation getSpaceOperation(char op) {
return RENAME;
case 'D':
return DELETE;
- // case 'T':
- // return TRAVERSE_DIRECTORY;
case 'L':
return LIST_DIRECTORY;
case 'M':
@@ -93,16 +94,6 @@ public char getSpaceOperationValue() {
return operation;
}
- public PathOperation getSpaceOp(int ordinal) {
-
- PathOperation[] sp = PathOperation.values();
- if ((ordinal >= 0) && (ordinal < sp.length)) {
- return sp[ordinal];
- } else {
- return UNDEFINED;
- }
- }
-
public int getNumberOfPathOp() {
return PathOperation.values().length - 1;
diff --git a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java
index 7f49e3c69..e3e06c989 100644
--- a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java
+++ b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java
@@ -21,7 +21,6 @@
package it.grid.storm.authz.path.model;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
/**
@@ -161,55 +160,6 @@ public enum SRMFileRequest {
private final String srmOp;
private final PathAccessMask requestedPathOps;
- private static HashMap ops = new HashMap() {
-
- /**
- *
- */
- private static final long serialVersionUID = 1L;
-
- {
- put("PTP-Over", PTP_Overwrite);
- put("srmPrepareToPut-overwrite", PTP_Overwrite);
- put("PTP", PTP);
- put("srmPrepareToPut", PTP);
- put("PTG", PTG);
- put("srmPrepareToGet", PTG);
- put("CPto_Over", CPto_Overwrite);
- put("srmCopy to-overwrite", CPto_Overwrite);
- put("CPto", CPto);
- put("srmCopy to", CPto);
- put("CPFrom", CPfrom);
- put("srmCopy from", CPfrom);
- put("RM", RM);
- put("srmRm", RM);
- put("RMD", RMD);
- put("srmRemoveDir", RM);
- put("MD", MD);
- put("srmMakeDir", MD);
- put("LS", LS);
- put("srmLs", LS);
- put("MV-source", MV_source);
- put("srmMove-source", MV_source);
- put("MV-dest-Over", MV_dest_Overwrite);
- put("srmMove-dest-overwrite", MV_dest_Overwrite);
- put("MV-dest", MV_dest);
- put("srmMove-dest", MV_dest);
- }
- };
-
- /*
- * Used only for testing
- */
- public static SRMFileRequest buildFromString(String srmOp) {
-
- if (ops.containsKey(srmOp)) {
- return ops.get(srmOp);
- } else {
- return null;
- }
- }
-
/**
* SRMOperation
*/
diff --git a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
index 421d01d6c..428631e9a 100644
--- a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
+++ b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java
@@ -35,7 +35,6 @@
import it.grid.storm.authz.path.model.PathOperation;
import it.grid.storm.authz.path.model.SRMFileRequest;
import it.grid.storm.authz.remote.Constants;
-import it.grid.storm.catalogs.OverwriteModeConverter;
import it.grid.storm.common.types.InvalidStFNAttributeException;
import it.grid.storm.common.types.StFN;
import it.grid.storm.config.Configuration;
@@ -47,6 +46,7 @@
import it.grid.storm.namespace.VirtualFSInterface;
import it.grid.storm.namespace.model.MappingRule;
import it.grid.storm.namespace.model.Protocol;
+import it.grid.storm.persistence.converter.OverwriteModeConverter;
import it.grid.storm.srm.types.TOverwriteMode;
class PermissionEvaluator {
diff --git a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java
index 1a3522b5f..11107991b 100644
--- a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java
+++ b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java
@@ -33,14 +33,4 @@ public AuthzDBReaderException(String message) {
super(message);
}
-
- public AuthzDBReaderException(String message, Throwable cause) {
-
- super(message, cause);
- }
-
- public AuthzDBReaderException(Throwable cause) {
-
- super(cause);
- }
}
diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java
deleted file mode 100644
index 8cec4103e..000000000
--- a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.authz.sa;
-
-import it.grid.storm.authz.SpaceAuthzInterface;
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.griduser.GridUserInterface;
-
-public abstract class SpaceAuthz implements SpaceAuthzInterface {
-
- private AuthzDBInterface authzDB;
-
- /**
- * @todo: 1) IMPLEMENT AUHTZ ENGINE
- * @todo: 2) IMPLEMENT CACHE
- * @todo: 3) IMPLEMENT PRINCIPAL LIST PERSISTENCE
- * @todo: 4) IMPLEMENT RECALCULATE CACHE
- */
-
- public SpaceAuthz() {
-
- super();
- }
-
- public abstract boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp);
-
- public void setAuthzDB(AuthzDBInterface authzDB) {
-
- this.authzDB = authzDB;
- }
-
- public AuthzDBInterface getAuthzDB() {
-
- return authzDB;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java
deleted file mode 100644
index 8c891db0b..000000000
--- a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- *
- */
-package it.grid.storm.authz.sa;
-
-import java.io.File;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.config.Configuration;
-import it.grid.storm.griduser.GridUserInterface;
-
-/**
- * @author zappi
- */
-public class SpaceDBAuthz extends SpaceAuthz {
-
- private static final Logger log = LoggerFactory.getLogger(SpaceDBAuthz.class);
-
- public static final String UNDEF = "undef-SpaceAuthzDB";
-
- private String spaceAuthzDBID = "not-defined";
- private static String configurationPATH;
- private String dbFileName;
-
- public SpaceDBAuthz() {
-
- }
-
- /**
- * @return
- */
- public static SpaceDBAuthz makeEmpty() {
-
- SpaceDBAuthz result = new SpaceDBAuthz();
- result.setSpaceAuthzDBID("default-SpaceAuthzDB");
- return result;
- }
-
- public SpaceDBAuthz(String dbFileName) {
-
- Configuration config = Configuration.getInstance();
- configurationPATH = config.namespaceConfigPath();
- if (existsAuthzDBFile(dbFileName)) {
- this.dbFileName = dbFileName;
- spaceAuthzDBID = dbFileName;
- }
- }
-
- /**
- * @param string
- */
- void setSpaceAuthzDBID(String id) {
-
- spaceAuthzDBID = id;
- }
-
- /**
- *
- */
- @Override
- public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) {
-
- return false;
- }
-
- @Override
- public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) {
-
- return false;
- }
-
-
- /**********************************************************************
- * BUILDINGs METHODS
- */
-
- /**
- * Check the existence of the AuthzDB file
- */
- private boolean existsAuthzDBFile(String dbFileName) {
-
- String fileName = configurationPATH + File.separator + dbFileName;
- boolean exists = (new File(fileName)).exists();
- if (!(exists)) {
- log.error("The AuthzDB File '{}' does not exists", dbFileName);
- }
- return exists;
- }
-
- /**
- * Return the AuthzDB FileName
- *
- * @return
- */
- String getAuthzDBFileName() {
-
- return dbFileName;
- }
-
- public String getSpaceAuthzID() {
-
- return spaceAuthzDBID;
- }
-
- /**
- *
- */
- public void refresh() {
-
- // empty
- }
-
-}
diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java
deleted file mode 100644
index 3ccec5bb4..000000000
--- a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-/**
- *
- */
-package it.grid.storm.authz.sa;
-
-import it.grid.storm.authz.sa.model.AuthzDBFixed;
-import it.grid.storm.authz.sa.model.SRMSpaceRequest;
-import it.grid.storm.griduser.GridUserInterface;
-
-/**
- * @author zappi
- */
-public class SpaceFixedAuthz extends SpaceAuthz {
-
- private static final String FIXED_ID = "fixed-space-authz";
-
- public SpaceFixedAuthz(AuthzDBFixed fixedAuthzDB)
- throws AuthzDBReaderException {
-
- }
-
- @Override
- public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) {
-
- // @todo : implement the simple algorithm.
- return true;
- }
-
- @Override
- public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) {
-
- // TODO Auto-generated method stub
- return true;
- }
-
- public String getSpaceAuthzID() {
-
- return FIXED_ID;
- }
-
- public void refresh() {
-
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java
deleted file mode 100644
index 70da88a0b..000000000
--- a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-
-public abstract class AnonymousFileTransferData extends
- SurlMultyOperationRequestData implements FileTransferData {
-
- protected TURLPrefix transferProtocols;
- protected TTURL transferURL;
-
- public AnonymousFileTransferData(TSURL toSURL, TURLPrefix transferProtocols,
- TReturnStatus status, TTURL transferURL)
- throws InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(toSURL, status);
- if (transferProtocols == null || transferURL == null) {
- throw new InvalidFileTransferDataAttributesException(toSURL,
- transferProtocols, status, transferURL);
- }
- this.transferProtocols = transferProtocols;
- this.transferURL = transferURL;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.FileTransferData#getTransferProtocols()
- */
- @Override
- public final TURLPrefix getTransferProtocols() {
-
- return transferProtocols;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.FileTransferData#getTransferURL()
- */
- @Override
- public final TTURL getTransferURL() {
-
- return transferURL;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * it.grid.storm.catalogs.FileTransferData#setTransferURL(it.grid.storm.srm
- * .types.TTURL)
- */
- @Override
- public final void setTransferURL(final TTURL turl) {
-
- if (turl != null) {
- transferURL = turl;
- }
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java
deleted file mode 100644
index 615c590fe..000000000
--- a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.srm.types.TTURL;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a PrepareToGetChunkData, that is part of a multifile
- * PrepareToGet srm request. It contains data about: the requestToken, the
- * fromSURL, the requested lifeTime of pinning, the TDirOption which tells
- * whether the requested SURL is a directory and if it must be recursed at all
- * levels, as well as the desired number of levels to recurse, the desired
- * transferProtocols in order of preference, the fileSize, and the transferURL
- * for the supplied SURL.
- *
- * @author EGRID - ICTP Trieste
- * @date March 21st, 2005
- * @version 3.0
- */
-public class AnonymousPtGData extends AnonymousFileTransferData implements
- PtGData {
-
- private static final Logger log = LoggerFactory
- .getLogger(AnonymousPtGData.class);
-
- /** requested lifetime of TURL: it is the pin time! */
- protected TLifeTimeInSeconds pinLifeTime;
- /** specifies if the request regards a directory and related info */
- protected TDirOption dirOption;
- /** size of file */
- protected TSizeInBytes fileSize;
-
- /**
- * @param requestToken
- * @param fromSURL
- * @param lifeTime
- * @param dirOption
- * @param desiredProtocols
- * @param fileSize
- * @param status
- * @param transferURL
- * @throws InvalidPtGDataAttributesException
- */
- public AnonymousPtGData(TSURL SURL, TLifeTimeInSeconds lifeTime,
- TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize,
- TReturnStatus status, TTURL transferURL)
- throws InvalidPtGDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(SURL, desiredProtocols, status, transferURL);
- if (lifeTime == null || dirOption == null || fileSize == null) {
- log.debug("Invalid arguments: lifeTime={}, dirOption={}, fileSize={}",
- lifeTime, dirOption, fileSize);
- throw new InvalidPtGDataAttributesException(SURL, lifeTime, dirOption,
- desiredProtocols, fileSize, status, transferURL);
-
- }
- this.pinLifeTime = lifeTime;
- this.dirOption = dirOption;
- this.fileSize = fileSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtGData#getPinLifeTime()
- */
- @Override
- public TLifeTimeInSeconds getPinLifeTime() {
-
- return pinLifeTime;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtGData#getDirOption()
- */
- @Override
- public TDirOption getDirOption() {
-
- return dirOption;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtGData#getFileSize()
- */
- @Override
- public TSizeInBytes getFileSize() {
-
- return fileSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * it.grid.storm.catalogs.PtGData#setFileSize(it.grid.storm.srm.types.TSizeInBytes
- * )
- */
- @Override
- public void setFileSize(TSizeInBytes size) {
-
- if (size != null) {
- fileSize = size;
- }
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * it.grid.storm.catalogs.PtGData#changeStatusSRM_FILE_PINNED(java.lang.String
- * )
- */
- @Override
- public void changeStatusSRM_FILE_PINNED(String explanation) {
-
- setStatus(TStatusCode.SRM_FILE_PINNED, explanation);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("PtGChunkData [pinLifeTime=");
- builder.append(pinLifeTime);
- builder.append(", dirOption=");
- builder.append(dirOption);
- builder.append(", fileSize=");
- builder.append(fileSize);
- builder.append(", transferProtocols=");
- builder.append(transferProtocols);
- builder.append(", SURL=");
- builder.append(SURL);
- builder.append(", status=");
- builder.append(status);
- builder.append(", transferURL=");
- builder.append(transferURL);
- builder.append("]");
- return builder.toString();
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
-
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + ((dirOption == null) ? 0 : dirOption.hashCode());
- result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode());
- result = prime * result
- + ((pinLifeTime == null) ? 0 : pinLifeTime.hashCode());
- return result;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(Object obj) {
-
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- AnonymousPtGData other = (AnonymousPtGData) obj;
- if (dirOption == null) {
- if (other.dirOption != null) {
- return false;
- }
- } else if (!dirOption.equals(other.dirOption)) {
- return false;
- }
- if (fileSize == null) {
- if (other.fileSize != null) {
- return false;
- }
- } else if (!fileSize.equals(other.fileSize)) {
- return false;
- }
- if (pinLifeTime == null) {
- if (other.pinLifeTime != null) {
- return false;
- }
- } else if (!pinLifeTime.equals(other.pinLifeTime)) {
- return false;
- }
- return true;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java
deleted file mode 100644
index dca2d5af5..000000000
--- a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class AnonymousPtPData extends AnonymousFileTransferData implements
- PtPData {
-
- private static final Logger log = LoggerFactory.getLogger(AnonymousPtPData.class);
-
- protected TSpaceToken spaceToken;
- protected TLifeTimeInSeconds pinLifetime;
- protected TLifeTimeInSeconds fileLifetime;
- protected TFileStorageType fileStorageType;
- protected TOverwriteMode overwriteOption;
- protected TSizeInBytes expectedFileSize;
-
- public AnonymousPtPData(TSURL toSURL, TLifeTimeInSeconds pinLifetime,
- TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TSizeInBytes expectedFileSize,
- TURLPrefix transferProtocols, TOverwriteMode overwriteOption,
- TReturnStatus status, TTURL transferURL)
- throws InvalidPtPDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(toSURL, transferProtocols, status, transferURL);
- if (pinLifetime == null || fileLifetime == null || spaceToken == null
- || fileStorageType == null || expectedFileSize == null
- || overwriteOption == null) {
- log.debug("Invalid arguments: pinLifetime={}, fileLifetime={}, "
- + "spaceToken={}, fileStorageType={}, expectedFileSize={}, "
- + "overwriteOption={}", pinLifetime, fileLifetime, spaceToken,
- fileStorageType, expectedFileSize, overwriteOption);
- throw new InvalidPtPDataAttributesException(toSURL, pinLifetime,
- fileLifetime, fileStorageType, spaceToken, expectedFileSize,
- transferProtocols, overwriteOption, status, transferURL);
- }
- this.spaceToken = spaceToken;
- this.pinLifetime = pinLifetime;
- this.fileLifetime = fileLifetime;
- this.fileStorageType = fileStorageType;
- this.expectedFileSize = expectedFileSize;
- this.overwriteOption = overwriteOption;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#getSpaceToken()
- */
- @Override
- public final TSpaceToken getSpaceToken() {
-
- return spaceToken;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#pinLifetime()
- */
- @Override
- public TLifeTimeInSeconds pinLifetime() {
-
- return pinLifetime;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#fileLifetime()
- */
- @Override
- public TLifeTimeInSeconds fileLifetime() {
-
- return fileLifetime;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#fileStorageType()
- */
- @Override
- public TFileStorageType fileStorageType() {
-
- return fileStorageType;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#expectedFileSize()
- */
- @Override
- public TSizeInBytes expectedFileSize() {
-
- return expectedFileSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see it.grid.storm.catalogs.PtPData#overwriteOption()
- */
- @Override
- public TOverwriteMode overwriteOption() {
-
- return overwriteOption;
- }
-
- /**
- * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- @Override
- public void changeStatusSRM_SPACE_AVAILABLE(String explanation) {
-
- setStatus(TStatusCode.SRM_SPACE_AVAILABLE, explanation);
- }
-
- /**
- * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- @Override
- public void changeStatusSRM_DUPLICATION_ERROR(String explanation) {
-
- setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation);
- }
-
- @Override
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("PtPChunkData\n");
- sb.append("toSURL=");
- sb.append(SURL);
- sb.append("; ");
- sb.append("pinLifetime=");
- sb.append(pinLifetime);
- sb.append("; ");
- sb.append("fileLifetime=");
- sb.append(fileLifetime);
- sb.append("; ");
- sb.append("fileStorageType=");
- sb.append(fileStorageType);
- sb.append("; ");
- sb.append("spaceToken=");
- sb.append(spaceToken);
- sb.append("; ");
- sb.append("expectedFileSize=");
- sb.append(expectedFileSize);
- sb.append("; ");
- sb.append("transferProtocols=");
- sb.append(transferProtocols);
- sb.append("; ");
- sb.append("overwriteOption=");
- sb.append(overwriteOption);
- sb.append("; ");
- sb.append("status=");
- sb.append(status);
- sb.append("; ");
- sb.append("transferURL=");
- sb.append(transferURL);
- sb.append("; ");
- return sb.toString();
- }
-
- @Override
- public int hashCode() {
-
- int hash = 17;
- hash = 37 * hash + SURL.hashCode();
- hash = 37 * hash + pinLifetime.hashCode();
- hash = 37 * hash + fileLifetime.hashCode();
- hash = 37 * hash + fileStorageType.hashCode();
- hash = 37 * hash + spaceToken.hashCode();
- hash = 37 * hash + expectedFileSize.hashCode();
- hash = 37 * hash + transferProtocols.hashCode();
- hash = 37 * hash + overwriteOption.hashCode();
- hash = 37 * hash + status.hashCode();
- hash = 37 * hash + transferURL.hashCode();
- return hash;
- }
-
- @Override
- public boolean equals(Object o) {
-
- if (o == this) {
- return true;
- }
- if (!(o instanceof AnonymousPtPData)) {
- return false;
- }
- AnonymousPtPData cd = (AnonymousPtPData) o;
- return SURL.equals(cd.SURL) && pinLifetime.equals(cd.pinLifetime)
- && fileLifetime.equals(cd.fileLifetime)
- && fileStorageType.equals(cd.fileStorageType)
- && spaceToken.equals(cd.spaceToken)
- && expectedFileSize.equals(cd.expectedFileSize)
- && transferProtocols.equals(cd.transferProtocols)
- && overwriteOption.equals(cd.overwriteOption) && status.equals(cd.status)
- && transferURL.equals(cd.transferURL);
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
index 31b6a1407..4e88d2187 100644
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java
@@ -17,14 +17,31 @@
package it.grid.storm.catalogs;
+import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.common.types.SizeUnit;
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
import it.grid.storm.config.Configuration;
-import it.grid.storm.griduser.GridUserInterface;
-// import it.grid.storm.namespace.SurlStatusStore;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.BoLChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql;
+import it.grid.storm.persistence.model.BoLChunkDataTO;
+import it.grid.storm.persistence.model.BoLPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedBoLChunkData;
+import it.grid.storm.persistence.model.ReducedBoLChunkDataTO;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
import it.grid.storm.srm.types.TDirOption;
@@ -36,20 +53,10 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
- * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and
- * provides methods for looking up a BoLChunkData based on TRequestToken, as
- * well as for adding a new entry and removing an existing one.
+ * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and provides methods for
+ * looking up a BoLChunkData based on TRequestToken, as well as for adding a new entry and removing
+ * an existing one.
*
* @author CNAF
* @date Aug 2009
@@ -57,762 +64,306 @@
*/
public class BoLChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(BoLChunkCatalog.class);
-
- /* only instance of BoLChunkCatalog present in StoRM! */
- private static final BoLChunkCatalog cat = new BoLChunkCatalog();
- private final BoLChunkDAO dao = BoLChunkDAO.getInstance();
-
- /*
- * Timer object in charge of transiting expired requests from SRM_FILE_PINNED
- * to SRM_RELEASED!
- */
- private final Timer transiter = new Timer();
- /* Delay time before starting cleaning thread! */
- private final long delay = Configuration.getInstance()
- .getTransitInitialDelay() * 1000;
- /* Period of execution of cleaning! */
- private final long period = Configuration.getInstance()
- .getTransitTimeInterval() * 1000;
-
- /**
- * Private constructor that starts the internal timer needed to periodically
- * check and transit requests whose pinLifetime has expired and are in
- * SRM_FILE_PINNED, to SRM_RELEASED.
- */
- private BoLChunkCatalog() {
-
- TimerTask transitTask = new TimerTask() {
-
- @Override
- public void run() {
-
- transitExpiredSRM_SUCCESS();
- }
- };
- transiter.scheduleAtFixedRate(transitTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of BoLChunkCatalog available.
- */
- public static BoLChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method that returns a Collection of BoLChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a BoLChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a message gets logged.
- */
- synchronized public Collection lookup(TRequestToken rt) {
-
- Collection chunkCollection = dao.find(rt);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection);
- List list = new ArrayList();
-
- if (chunkCollection.isEmpty()) {
- log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified "
- + "request: {}", rt);
- return list;
- }
-
- BoLPersistentChunkData chunk;
- for (BoLChunkDataTO chunkTO : chunkCollection) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("BoL CHUNK CATALOG: returning " + list);
- return list;
- }
-
- /**
- * Generates a BoLChunkData from the received BoLChunkDataTO
- *
- * @param auxTO
- * @param rt
- * @return
- */
- private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (auxTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(auxTO.normalizedStFN());
- }
- if (auxTO.sulrUniqueID() != null) {
- fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- auxTO.getLifeTime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed. "
- + "Drop the value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // dirOption
- TDirOption dirOption = null;
- try {
- dirOption = new TDirOption(auxTO.getDirOption(),
- auxTO.getAllLevelRecursive(), auxTO.getNumLevel());
- } catch (InvalidTDirOptionAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO
- .getProtocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols or"
- + " could not translate TransferProtocols!");
- /* fail construction of BoLChunkData! */
- transferProtocols = null;
- }
- // fileSize
- TSizeInBytes fileSize = null;
- try {
- fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- auxTO.getStatus());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + auxTO.getStatus());
- } else {
- status = new TReturnStatus(code, auxTO.getErrString());
- }
- // transferURL
- /*
- * whatever is read is just meaningless because BoL will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make BoLChunkData
- BoLPersistentChunkData aux = null;
- try {
- aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption,
- transferProtocols, fileSize, status, transferURL,
- auxTO.getDeferredStartTime());
- aux.setPrimaryKey(auxTO.getPrimaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedBoLChunk(auxTO);
- log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL "
- + "chunk data from persistence. Dropping chunk from request {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the BoLChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedBoLChunkDataTO chunkTO,
- final ReducedBoLChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedBoLChunkDataAttributesException
- */
- private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO,
- final BoLPersistentChunkData chunk)
- throws InvalidReducedBoLChunkDataAttributesException {
-
- ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedBoLChunkData from the data contained in the received
- * BoLChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedBoLChunkDataAttributesException
- */
- private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk)
- throws InvalidReducedBoLChunkDataAttributesException {
-
- ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(),
- chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedBoLChunkDataTO from the data contained in the received
- * BoLChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) {
-
- ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey());
- reducedChunkTO.setFromSURL(chunkTO.getFromSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID());
- reducedChunkTO.setStatus(chunkTO.getStatus());
- reducedChunkTO.setErrString(chunkTO.getErrString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received BoLChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(BoLChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.sulrUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedBoLChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- // TODO MICHELE USER_SURL new method
- private boolean isComplete(ReducedBoLChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- /**
- * Method used to update into Persistence a retrieved BoLChunkData. In case
- * any error occurs, the operation does not proceed but no Exception is
- * thrown. Error messages get logged.
- *
- * Only fileSize, StatusCode, errString and transferURL are updated. Likewise
- * for the request pinLifetime.
- */
- synchronized public void update(BoLPersistentChunkData cd) {
-
- BoLChunkDataTO to = new BoLChunkDataTO();
- /* Primary key needed by DAO Object */
- to.setPrimaryKey(cd.getPrimaryKey());
- to.setFileSize(cd.getFileSize().value());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- cd.getStatus().getStatusCode()));
- to.setErrString(cd.getStatus().getExplanation());
- to.setLifeTime(PinLifetimeConverter.getInstance().toDB(
- cd.getLifeTime().value()));
- // TODO MICHELE USER_SURL fill new fields
- to.setNormalizedStFN(cd.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId()));
-
- dao.update(to);
- // TODO MICHELE SURL STORE
- // SurlStatusStore.getInstance().storeSurlStatus(cd.getSURL(),
- // cd.getStatus().getStatusCode());
- }
-
- /**
- * Refresh method. TODO THIS IS A WORK IN PROGRESS!!!! This method have to
- * synch the ChunkData information with the database status.
- *
- * @param auxTO
- * @param BoLPersistentChunkData
- * inputChunk
- * @return BoLChunkData outputChunk
- */
- synchronized public BoLPersistentChunkData refreshStatus(
- BoLPersistentChunkData inputChunk) {
-
- /* Currently not used */
- // Call the dao refresh method to synch with the db status
- BoLChunkDataTO auxTO = dao.refresh(inputChunk.getPrimaryKey());
-
- log.debug("BoL CHUNK CATALOG: retrieved data {}", auxTO);
- if (auxTO == null) {
- log.warn("BoL CHUNK CATALOG! Empty TO found in persistence for specified "
- + "request: {}", inputChunk.getPrimaryKey());
- return inputChunk;
- }
-
- /*
- * In this first version the only field updated is the Status. Once
- * updated, the new status is rewritten into the input ChunkData
- */
-
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus());
- if (code != TStatusCode.EMPTY) {
- status = new TReturnStatus(code, auxTO.getErrString());
- }
- inputChunk.setStatus(status);
- return inputChunk;
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkData Objects associated
- * to the supplied TRequestToken.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedBoLChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given TRequestToken, then an empty
- * Collection is returned and a messagge gets logged.
- */
- synchronized public Collection lookupReducedBoLChunkData(
- TRequestToken rt) {
-
- Collection reducedChunkDataTOs = dao.findReduced(rt
- .getValue());
- log.debug("BoL CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- ArrayList list = new ArrayList();
- if (reducedChunkDataTOs.isEmpty()) {
- log.debug("BoL CHUNK CATALOG! No chunks found in persistence for {}", rt);
- } else {
- ReducedBoLChunkData reducedChunkData = null;
- for (ReducedBoLChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- }
- return list;
- }
-
- public Collection lookupReducedBoLChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- requestToken, surlsUniqueIDs, surlsArray);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupBoLChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupBoLChunkData(TSURL surl) {
-
- return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- private Collection lookupBoLChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupBoLChunkData(List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOCollection) {
-
- List list = new ArrayList();
- BoLPersistentChunkData chunk;
- for (BoLChunkDataTO chunkTO : chunkDataTOCollection) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! unable to add missing informations "
- + "on DB to the request: {}", e.getMessage());
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- private BoLPersistentChunkData makeOne(BoLChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO, new TRequestToken(chunkTO.getRequestToken(),
- chunkTO.getTimeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkData Objects matching
- * the supplied GridUser and Collection of TSURLs.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedBoLChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given GridUser and Collection of
- * TSURLs, then an empty Collection is returned and a message gets logged.
- */
- synchronized public Collection lookupReducedBoLChunkData(
- GridUserInterface gu, Collection tsurlCollection) {
-
- int[] surlsUniqueIDs = new int[tsurlCollection.size()];
- String[] surls = new String[tsurlCollection.size()];
- int index = 0;
- for (TSURL tsurl : tsurlCollection) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- gu.getDn(), surlsUniqueIDs, surls);
- log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedBoLChunkData reducedChunkData;
- for (ReducedBoLChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("BoL CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- /**
- * @param auxTO
- * @return
- */
- private ReducedBoLChunkData makeOneReduced(
- ReducedBoLChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // make ReducedBoLChunkData
- ReducedBoLChunkData aux = null;
- try {
- aux = new ReducedBoLChunkData(fromSURL, status);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedBoLChunkDataAttributesException e) {
- log.warn("BoL CHUNK CATALOG! Retrieved malformed "
- + "Reduced BoL chunk data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied BoLChunkData
- * gets the primary key changed to the value assigned in Persistence.
- *
- * This method is intended to be used by a recursive BoL request: the parent
- * request supplies a directory which must be expanded, so all new children
- * requests resulting from the files in the directory are added into
- * persistence.
- *
- * So this method does _not_ add a new SRM prepare_to_get request into the DB!
- *
- * The only children data written into the DB are: sourceSURL, TDirOption,
- * statusCode and explanation.
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! Proper messages get logged by underlaying DAO.
- */
- synchronized public void addChild(BoLPersistentChunkData chunkData) {
-
- BoLChunkDataTO to = new BoLChunkDataTO();
- // needed for now to find ID of request! Must be changed soon!
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setDeferredStartTime(chunkData.getDeferredStartTime());
-
- /* add the entry and update the Primary Key field */
- dao.addChild(to);
- chunkData.setPrimaryKey(to.getPrimaryKey());
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied BoLChunkData
- * gets the primary key changed to the value assigned in the Persistence. The
- * method requires the GridUser to whom associate the added request.
- *
- * This method is intended to be used by an srmCopy request in push mode which
- * implies a local srmBoL. The only fields from BoLChunkData that are
- * considered are: the requestToken, the sourceSURL, the pinLifetime, the
- * dirOption, the protocolList, the status and error string.
- *
- * So this method _adds_ a new SRM prepare_to_get request into the DB!
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! The underlaying DAO logs proper error messages.
- */
- synchronized public void add(BoLPersistentChunkData chunkData,
- GridUserInterface gu) {
-
- /* Currently NOT used */
- BoLChunkDataTO to = new BoLChunkDataTO();
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- // TODO MICHELE USER_SURL fill new fields
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setLifeTime(new Long(chunkData.getLifeTime().value()).intValue());
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setProtocolList(TransferProtocolListConverter.toDB(chunkData
- .getTransferProtocols()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setDeferredStartTime(chunkData.getDeferredStartTime());
-
- /* add the entry and update the Primary Key field! */
- dao.addNew(to, gu.getDn());
- chunkData.setPrimaryKey(to.getPrimaryKey());
- }
-
- /**
- * Method used to establish if in Persistence there is a BoLChunkData working
- * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case
- * true is returned. In case none are found or there is any problem, false is
- * returned. This method is intended to be used by srmMv.
- */
- synchronized public boolean isSRM_FILE_PINNED(TSURL surl) {
-
- return (dao.numberInSRM_SUCCESS(surl.uniqueId()) > 0);
- }
-
- /**
- * Method used to transit the specified Collection of ReducedBoLChunkData from
- * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not
- * transited. In case of any error nothing is done, but proper error messages
- * get logged by the underlaying DAO.
- */
- synchronized public void transitSRM_SUCCESStoSRM_RELEASED(
- Collection chunks, TRequestToken token) {
-
- if (chunks == null || chunks.isEmpty()) {
- return;
- }
-
- long[] primaryKeys = new long[chunks.size()];
- int index = 0;
- for (ReducedBoLChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
- }
- dao.transitSRM_SUCCESStoSRM_RELEASED(primaryKeys, token);
- }
-
- /**
- * This method is intended to be used by srmRm to transit all BoL chunks on
- * the given SURL which are in the SRM_FILE_PINNED state, to SRM_ABORTED. The
- * supplied String will be used as explanation in those chunks return status.
- * The global status of the request is _not_ changed.
- *
- * The TURL of those requests will automatically be set to empty. Notice that
- * both removeAllJit(SURL) and removeVolatile(SURL) are automatically invoked
- * on PinnedFilesCatalog, to remove any entry and corresponding physical ACLs.
- *
- * Beware, that the chunks may be part of requests that have finished, or that
- * still have not finished because other chunks are being processed.
- */
- synchronized public void transitSRM_SUCCESStoSRM_ABORTED(TSURL surl,
- String explanation) {
-
- /* Currently NOT used */
- if (explanation == null) {
- explanation = "";
- }
- dao.transitSRM_SUCCESStoSRM_ABORTED(surl.uniqueId(), surl.toString(),
- explanation);
- }
-
- /**
- * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of
- * all BoL Requests whose pinLifetime has expired and the state still has not
- * been changed (a user forgot to run srmReleaseFiles)!
- */
- synchronized public void transitExpiredSRM_SUCCESS() {
-
- dao.transitExpiredSRM_SUCCESS();
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(BoLChunkCatalog.class);
+
+ private final BoLChunkDAO dao;
+
+ private static BoLChunkCatalog instance;
+
+ public static synchronized BoLChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new BoLChunkCatalog();
+ }
+ return instance;
+ }
+
+ /**
+ * Private constructor that starts the internal timer needed to periodically check and transit
+ * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED.
+ */
+ private BoLChunkCatalog() {
+
+ dao = BoLChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method that returns a Collection of BoLChunkData Objects matching the supplied TRequestToken.
+ *
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * BoLChunkData Object to be created, then that part of the request is dropped and gets logged,
+ * and the processing continues with the next part. All valid chunks get returned: the others get
+ * dropped.
+ *
+ * If there are no chunks to process then an empty Collection is returned, and a message gets
+ * logged.
+ */
+ synchronized public Collection lookup(TRequestToken rt) {
+
+ Collection chunkCollection = dao.find(rt);
+ log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection);
+ List list = new ArrayList();
+
+ if (chunkCollection.isEmpty()) {
+ log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified request: {}", rt);
+ return list;
+ }
+
+ BoLPersistentChunkData chunk;
+ for (BoLChunkDataTO chunkTO : chunkCollection) {
+ chunk = makeOne(chunkTO, rt);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(completeTO(chunkTO, chunk));
+ } catch (InvalidReducedBoLChunkDataAttributesException e) {
+ log.warn(
+ "BoL CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("BoL CHUNK CATALOG: returning " + list);
+ return list;
+ }
+
+ /**
+ * Generates a BoLChunkData from the received BoLChunkDataTO
+ *
+ * @param auxTO
+ * @param rt
+ * @return
+ */
+ private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ TSURL fromSURL = null;
+ try {
+ fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (auxTO.normalizedStFN() != null) {
+ fromSURL.setNormalizedStFN(auxTO.normalizedStFN());
+ }
+ if (auxTO.sulrUniqueID() != null) {
+ fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue());
+ }
+ // lifeTime
+ TLifeTimeInSeconds lifeTime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.getLifeTime());
+ // Check for max value allowed
+ long max = Configuration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed. "
+ + "Drop the value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // dirOption
+ TDirOption dirOption = null;
+ try {
+ dirOption =
+ new TDirOption(auxTO.getDirOption(), auxTO.getAllLevelRecursive(), auxTO.getNumLevel());
+ } catch (InvalidTDirOptionAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.getProtocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols or" + " could not translate TransferProtocols!");
+ /* fail construction of BoLChunkData! */
+ transferProtocols = null;
+ }
+ // fileSize
+ TSizeInBytes fileSize = null;
+ try {
+ fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES);
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.getStatus());
+ } else {
+ status = new TReturnStatus(code, auxTO.getErrString());
+ }
+ // transferURL
+ /*
+ * whatever is read is just meaningless because BoL will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make BoLChunkData
+ BoLPersistentChunkData aux = null;
+ try {
+ aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, transferProtocols,
+ fileSize, status, transferURL, auxTO.getDeferredStartTime());
+ aux.setPrimaryKey(auxTO.getPrimaryKey());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.updateStatus(auxTO, SRM_FAILURE, "Request is malformed!");
+ log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL "
+ + "chunk data from persistence. Dropping chunk from request {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * BoLChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedBoLChunkDataTO chunkTO, final ReducedBoLChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedBoLChunkDataAttributesException
+ */
+ private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO,
+ final BoLPersistentChunkData chunk) throws InvalidReducedBoLChunkDataAttributesException {
+
+ ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedBoLChunkData from the data contained in the received BoLChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedBoLChunkDataAttributesException
+ */
+ private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk)
+ throws InvalidReducedBoLChunkDataAttributesException {
+
+ ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), chunk.getStatus());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedBoLChunkDataTO from the data contained in the received BoLChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) {
+
+ ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey());
+ reducedChunkTO.setFromSURL(chunkTO.getFromSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID());
+ reducedChunkTO.setStatus(chunkTO.getStatus());
+ reducedChunkTO.setErrString(chunkTO.getErrString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received BoLChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(BoLChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.sulrUniqueID() != null);
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved BoLChunkData. In case any error occurs, the
+ * operation does not proceed but no Exception is thrown. Error messages get logged.
+ *
+ * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request
+ * pinLifetime.
+ */
+ synchronized public void update(BoLPersistentChunkData cd) {
+
+ BoLChunkDataTO to = new BoLChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(cd.getPrimaryKey());
+ to.setFileSize(cd.getFileSize().value());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(cd.getStatus().getStatusCode()));
+ to.setErrString(cd.getStatus().getExplanation());
+ to.setLifeTime(PinLifetimeConverter.getInstance().toDB(cd.getLifeTime().value()));
+ // TODO MICHELE USER_SURL fill new fields
+ to.setNormalizedStFN(cd.getSURL().normalizedStFN());
+ to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId()));
+
+ dao.update(to);
+ }
+
+ /**
+ * Method used to add into Persistence a new entry. The supplied BoLChunkData gets the primary key
+ * changed to the value assigned in Persistence.
+ *
+ * This method is intended to be used by a recursive BoL request: the parent request supplies a
+ * directory which must be expanded, so all new children requests resulting from the files in the
+ * directory are added into persistence.
+ *
+ * So this method does _not_ add a new SRM prepare_to_get request into the DB!
+ *
+ * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and
+ * explanation.
+ *
+ * In case of any error the operation does not proceed, but no Exception is thrown! Proper
+ * messages get logged by underlaying DAO.
+ */
+ synchronized public void addChild(BoLPersistentChunkData chunkData) {
+
+ BoLChunkDataTO to = new BoLChunkDataTO();
+ // needed for now to find ID of request! Must be changed soon!
+ to.setRequestToken(chunkData.getRequestToken().toString());
+ to.setFromSURL(chunkData.getSURL().toString());
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
+
+ to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
+ to.setDirOption(chunkData.getDirOption().isDirectory());
+ to.setNumLevel(chunkData.getDirOption().getNumLevel());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setDeferredStartTime(chunkData.getDeferredStartTime());
+
+ /* add the entry and update the Primary Key field */
+ dao.addChild(to);
+ chunkData.setPrimaryKey(to.getPrimaryKey());
+ }
+
+ public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java
deleted file mode 100644
index bd19757f8..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java
+++ /dev/null
@@ -1,1701 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.ea.StormEA;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.StoRI;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TRequestType;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(BoLChunkDAO.class);
-
- /** String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /** String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getDBURL();
- /** String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /** String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
- /** Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- private final static BoLChunkDAO dao = new BoLChunkDAO();
-
- /**
- * timer thread that will run a taask to alert when reconnecting is necessary!
- */
- private Timer clock = null;
- /**
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /** milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance()
- .getDBReconnectPeriod() * 1000;
- /** initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
- /** boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- private BoLChunkDAO() {
-
- setUpConnection();
-
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the BoLChunkDAO.
- */
- public static BoLChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The supplied
- * BoLChunkData is used to fill in only the DB table where file specific info
- * gets recorded: it does _not_ add a new request! So if spurious data is
- * supplied, it will just stay there because of a lack of a parent request!
- */
- public synchronized void addChild(BoLChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: addChild - unable to get a valid connection!");
- return;
- }
- String str = null;
- PreparedStatement id = null; // statement to find out the ID associated to
- // the request token
- ResultSet rsid = null; // result set containing the ID of the request.
- // insertion
- try {
-
- /* WARNING!!!! We are forced to run a query to get the ID of the request,
- * which should NOT be so because the corresponding request object should
- * have been changed with the extra field! However, it is not possible
- * at the moment to perform such change because of strict deadline and
- * the change could wreak havoc the code. So we are forced to make this
- * query!!!
- */
-
- // begin transaction
- con.setAutoCommit(false);
- logWarnings(con.getWarnings());
-
- // find ID of request corresponding to given RequestToken
- str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?";
-
- id = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- id.setString(1, to.getRequestToken());
- logWarnings(id.getWarnings());
-
- log.debug("BoL CHUNK DAO: addChild; {}", id.toString());
- rsid = id.executeQuery();
- logWarnings(id.getWarnings());
-
- /* ID of request in request_process! */
- int request_id = extractID(rsid);
- int id_s = fillBoLTables(to, request_id);
-
- // end transaction!
- con.commit();
- logWarnings(con.getWarnings());
- con.setAutoCommit(true);
- logWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rsid);
- close(id);
- }
- }
-
- /**
- * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The client_dn must
- * also be supplied as a String. The supplied BoLChunkData is used to fill in
- * all the DB tables where file specific info gets recorded: it _adds_ a new
- * request!
- */
- public synchronized void addNew(BoLChunkDataTO to, String client_dn) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: addNew - unable to get a valid connection!");
- return;
- }
- String str = null;
- /* Result set containing the ID of the inserted new request */
- ResultSet rs_new = null;
- /* Insert new request into process_request */
- PreparedStatement addNew = null;
- /* Insert protocols for request. */
- PreparedStatement addProtocols = null; // insert protocols for request.
- try {
- // begin transaction
- con.setAutoCommit(false);
- logWarnings(con.getWarnings());
-
- // add to request_queue...
- str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) VALUES (?,?,?,?,?,?,?,?,?)";
- addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- /* request type set to bring online */
- addNew.setString(1,
- RequestTypeConverter.getInstance().toDB(TRequestType.BRING_ON_LINE));
- logWarnings(addNew.getWarnings());
-
- addNew.setString(2, client_dn);
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(3, to.getLifeTime());
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(
- 4,
- StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_INPROGRESS));
- logWarnings(addNew.getWarnings());
-
- addNew.setString(5, "New BoL Request resulting from srmCopy invocation.");
- logWarnings(addNew.getWarnings());
-
- addNew.setString(6, to.getRequestToken());
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(7, 1); // number of requested files set to 1!
- logWarnings(addNew.getWarnings());
-
- addNew.setTimestamp(8, new Timestamp(new Date().getTime()));
- logWarnings(addNew.getWarnings());
-
- addNew.setInt(9, to.getDeferredStartTime());
- logWarnings(addNew.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addNew.toString());
- addNew.execute();
- logWarnings(addNew.getWarnings());
-
- rs_new = addNew.getGeneratedKeys();
- int id_new = extractID(rs_new);
-
- // add protocols...
- str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)";
- addProtocols = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) {
- addProtocols.setInt(1, id_new);
- logWarnings(addProtocols.getWarnings());
-
- addProtocols.setString(2, i.next());
- logWarnings(addProtocols.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addProtocols.toString());
- addProtocols.execute();
- logWarnings(addProtocols.getWarnings());
- }
-
- // addChild...
- int id_s = fillBoLTables(to, id_new);
-
- // end transaction!
- con.commit();
- logWarnings(con.getWarnings());
- con.setAutoCommit(true);
- logWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Rolling back! Unable to complete addNew! "
- + "BoLChunkDataTO: {}; exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; "
- + "exception received: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rs_new);
- close(addNew);
- close(addProtocols);
- }
- }
-
- /**
- * To be used inside a transaction
- *
- * @param to
- * @param requestQueueID
- * @return
- * @throws SQLException
- * @throws Exception
- */
- private synchronized int fillBoLTables(BoLChunkDataTO to, int requestQueueID)
- throws SQLException, Exception {
-
- String str = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_do = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_b = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_s = null;
- /* insert TDirOption for request */
- PreparedStatement addDirOption = null;
- /* insert request_Bol for request */
- PreparedStatement addBoL = null;
- PreparedStatement addChild = null;
-
- try {
- // first fill in TDirOption
- str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)";
- addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addDirOption.setBoolean(1, to.getDirOption());
- logWarnings(addDirOption.getWarnings());
-
- addDirOption.setBoolean(2, to.getAllLevelRecursive());
- logWarnings(addDirOption.getWarnings());
-
- addDirOption.setInt(3, to.getNumLevel());
- logWarnings(addDirOption.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addDirOption.toString());
- addDirOption.execute();
- logWarnings(addDirOption.getWarnings());
-
- rs_do = addDirOption.getGeneratedKeys();
- int id_do = extractID(rs_do);
-
- // second fill in request_BoL... sourceSURL and TDirOption!
- str = "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)";
- addBoL = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addBoL.setInt(1, id_do);
- logWarnings(addBoL.getWarnings());
-
- addBoL.setInt(2, requestQueueID);
- logWarnings(addBoL.getWarnings());
-
- addBoL.setString(3, to.getFromSURL());
- logWarnings(addBoL.getWarnings());
-
- addBoL.setString(4, to.normalizedStFN());
- logWarnings(addBoL.getWarnings());
-
- addBoL.setInt(5, to.sulrUniqueID());
- logWarnings(addBoL.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; {}", addBoL.toString());
- addBoL.execute();
- logWarnings(addBoL.getWarnings());
-
- rs_b = addBoL.getGeneratedKeys();
- int id_g = extractID(rs_b);
-
- // third fill in status_BoL...
- str = "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)";
- addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- logWarnings(con.getWarnings());
- addChild.setInt(1, id_g);
- logWarnings(addChild.getWarnings());
-
- addChild.setInt(2, to.getStatus());
- logWarnings(addChild.getWarnings());
-
- addChild.setString(3, to.getErrString());
- logWarnings(addChild.getWarnings());
-
- log.trace("BoL CHUNK DAO: addNew; " + addChild.toString());
- addChild.execute();
- logWarnings(addChild.getWarnings());
-
- return id_g;
- } finally {
- close(rs_do);
- close(rs_b);
- close(rs_s);
- close(addDirOption);
- close(addBoL);
- close(addChild);
- }
- }
-
- /**
- * Method used to save the changes made to a retrieved BoLChunkDataTO, back
- * into the MySQL DB. Only the fileSize, statusCode and explanation, of
- * status_BoL table are written to the DB. Likewise for the request
- * pinLifetime. In case of any error, an error message gets logged but no
- * exception is thrown.
- */
- public synchronized void update(BoLChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID)"
- + " SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=?"
- + " WHERE rb.ID=?");
- logWarnings(con.getWarnings());
- updateFileReq.setLong(1, to.getFileSize());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(2, to.getStatus());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(3, to.getErrString());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(4, to.getLifeTime());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(5, to.normalizedStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(6, to.sulrUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(7, to.getPrimaryKey());
- logWarnings(updateFileReq.getWarnings());
- // execute update
- log.trace("BoL CHUNK DAO: update method; {}", updateFileReq.toString());
- updateFileReq.executeUpdate();
- logWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Bol represented by the received ReducedBoLChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, "
- + "sourceSURL_uniqueID=? WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * TODO WARNING! THIS IS A WORK IN PROGRESS!!! Method used to refresh the
- * BoLChunkDataTO information from the MySQL DB. In this first version, only
- * the statusCode is reloaded from the DB. TODO The next version must contains
- * all the information related to the Chunk! In case of any error, an error
- * message gets logged but no exception is thrown.
- */
- public synchronized BoLChunkDataTO refresh(long primary_key) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
- // get chunks of the request
- str = "SELECT statusCode " + "FROM status_BoL "
- + "WHERE request_BoLID=?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- find.setLong(1, primary_key);
-
- logWarnings(find.getWarnings());
- log.trace("BoL CHUNK DAO: refresh status method; " + find.toString());
-
- rs = find.executeQuery();
-
- logWarnings(find.getWarnings());
- BoLChunkDataTO aux = null;
- while (rs.next()) {
- aux = new BoLChunkDataTO();
- aux.setStatus(rs.getInt("statusCode"));
- }
- return aux;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return null;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding BoLChunkDataTO
- * objects. An initial simple query establishes the list of protocols
- * associated with the request. A second complex query establishes all chunks
- * associated with the request, by properly joining request_queue,
- * request_BoL, status_BoL and request_DirOption. The considered fields are:
- * (1) From status_BoL: the ID field which becomes the TOs primary key, and
- * statusCode. (2) From request_BoL: sourceSURL (3) From request_queue:
- * pinLifetime (4) From request_DirOption: isSourceADirectory,
- * alLevelRecursive, numOfLevels In case of any error, a log gets written and
- * an empty collection is returned. No exception is thrown. NOTE! Chunks in
- * SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- str = "SELECT tp.config_ProtocolsID "
- + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID "
- + "WHERE rq.r_token=?";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList protocols = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(find);
-
- // get chunks of the request
- str = "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sb.statusCode<>?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- BoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new BoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime"));
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
- chunkDataTO.setProtocolList(protocols);
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given TRequestToken expressed as String.
- */
- public synchronized Collection findReduced(
- String reqtoken) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.r_token=?";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, reqtoken);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with request token; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_Bol that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, requestToken.getValue());
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedBoLChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- String griduser, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_Bol that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, griduser);
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- ReducedBoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedBoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO: {}", e.getMessage(), e);
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns the number of BoL requests on the given SURL, that are
- * in SRM_SUCCESS state. This method is intended to be used by BoLChunkCatalog
- * in the isSRM_SUCCESS method invocation. In case of any error, 0 is
- * returned.
- */
- public synchronized int numberInSRM_SUCCESS(int surlUniqueID) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: numberInSRM_SUCCESS - unable to get a valid connection!");
- return 0;
- }
- String str = "SELECT COUNT(rb.ID) "
- + "FROM status_BoL sb JOIN request_BoL rb "
- + "ON (sb.request_BoLID=rb.ID) "
- + "WHERE rb.sourceSURL_uniqueID=? AND sb.statusCode=?";
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- find.setInt(1, surlUniqueID);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(find.getWarnings());
-
- log.trace("BoL CHUNK DAO - numberInSRM_SUCCESS method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- int numberFileSuccessful = 0;
- if (rs.next()) {
- numberFileSuccessful = rs.getInt(1);
- }
- return numberFileSuccessful;
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to determine numberInSRM_SUCCESS! "
- + "Returning 0! ", e.getMessage(), e);
- return 0;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model. This method attempts to change the status of the request to
- * SRM_FAILURE and record it in the DB. This operation could potentially fail
- * because the source of the malformed problems could be a problematic DB;
- * indeed, initially only log messages where recorded. Yet it soon became
- * clear that the source of malformed data were the clients and/or FE
- * recording info in the DB. In these circumstances the client would see its
- * request as being in the SRM_IN_PROGRESS state for ever. Hence the pressing
- * need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedBoLChunk(BoLChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: signalMalformedBoLChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_BoL SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_BoLID=" + auxTO.getPrimaryKey();
- PreparedStatement signal = null;
- try {
- signal = con.prepareStatement(signalSQL);
- logWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- logWarnings(signal.getWarnings());
-
- log.trace("BoL CHUNK DAO: signalMalformed; {}", signal.toString());
- signal.executeUpdate();
- logWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("BoLChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Exception: {}", auxTO.toString(),
- e.toString(), e);
- } finally {
- close(signal);
- }
- }
-
- /**
- * Method that updates all expired requests in SRM_SUCCESS state, into
- * SRM_RELEASED. This is needed when the client forgets to invoke
- * srmReleaseFiles().
- *
- * @return
- */
- public synchronized List transitExpiredSRM_SUCCESS() {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitExpiredSRM_SUCCESS - unable to get a valid connection!");
- return new ArrayList();
- }
-
- HashMap expiredSurlMap = new HashMap();
- String str = null;
- PreparedStatement prepStatement = null;
-
- /* Find all expired surls */
- try {
- // start transaction
- con.setAutoCommit(false);
-
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
-
- ResultSet res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage());
- }
- }
- expiredSurlMap.put(sourceSURL, uniqueID);
- }
-
- if (expiredSurlMap.isEmpty()) {
- commit(con);
- log.trace("BoLChunkDAO! No chunk of BoL request was transited from "
- + "SRM_SUCCESS to SRM_RELEASED.");
- return new ArrayList();
- }
- } catch (SQLException e) {
- log.error("BoLChunkDAO! SQLException.", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(prepStatement);
- }
-
- /* Update status of all successful surls to SRM_RELEASED */
-
- prepStatement = null;
- try {
-
- str = "UPDATE "
- + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? "
- + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(prepStatement.getWarnings());
-
- prepStatement.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(prepStatement.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}",
- prepStatement.toString());
-
- int count = prepStatement.executeUpdate();
- logWarnings(prepStatement.getWarnings());
-
- if (count == 0) {
- log.trace("BoLChunkDAO! No chunk of BoL request was"
- + " transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoLChunkDAO! {} chunks of BoL requests were transited from "
- + "SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoLChunkDAO! Unable to transit expired SRM_SUCCESS chunks of "
- + "BoL requests, to SRM_RELEASED! ", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(prepStatement);
- }
-
- /*
- * in order to enhance performance here we can check if there is any file
- * system with tape (T1D0, T1D1), if there is not any we can skip the
- * following
- */
-
- /* Find all not expired surls from PtG */
-
- HashSet pinnedSurlSet = new HashSet();
- try {
- // SURLs pinned by BoLs
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- ResultSet res = null;
-
- prepStatement = con.prepareStatement(str);
- res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- close(prepStatement);
-
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM "
- + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- prepStatement = con.prepareStatement(str);
-
- prepStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- res = prepStatement.executeQuery();
- logWarnings(prepStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("BoLChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- commit(con);
-
- } catch (SQLException e) {
- log.error("BoLChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- } finally {
- close(prepStatement);
- }
-
- /* Remove the Extended Attribute pinned if there is not a valid surl on it */
- ArrayList expiredSurlList = new ArrayList();
- TSURL surl;
- for (Entry surlEntry : expiredSurlMap.entrySet()) {
- if (!pinnedSurlSet.contains(surlEntry.getValue())) {
- try {
- surl = TSURL.makeFromStringValidate(surlEntry.getKey());
- } catch (InvalidTSURLAttributesException e) {
- log.error("Invalid SURL, cannot release the pin "
- + "(Extended Attribute): {}", surlEntry.getKey());
- continue;
- }
- expiredSurlList.add(surl);
- StoRI stori;
- try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl);
- } catch (Throwable e) {
- log.error("Invalid SURL {} cannot release the pin. {}: {}",
- surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage());
- continue;
- }
-
- if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- StormEA.removePinned(stori.getAbsolutePath());
- }
- }
- }
- return expiredSurlList;
- }
-
- /**
- * Method that transits chunks in SRM_SUCCESS to SRM_ABORTED, for the given
- * SURL: the overall request status of the requests containing that chunk, is
- * not changed! The TURL is set to null. Beware, that the chunks may be part
- * of requests that have finished, or that still have not finished because
- * other chunks are still being processed.
- */
- public synchronized void transitSRM_SUCCESStoSRM_ABORTED(int surlUniqueID,
- String surl, String explanation) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_ABORTED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_BoL sb JOIN request_BoL rb ON sb.request_BoLID=rb.ID "
- + "SET sb.statusCode=?, sb.explanation=?, sb.transferURL=NULL "
- + "WHERE sb.statusCode=? AND (rb.sourceSURL_uniqueID=? OR rb.targetSURL=?)";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(stmt.getWarnings());
-
- stmt.setString(2, explanation);
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(3,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(4, surlUniqueID);
- logWarnings(stmt.getWarnings());
-
- stmt.setString(5, surl);
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_ABORTED: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count > 0) {
- log.info("BoL CHUNK DAO! {} chunks were transited from SRM_SUCCESS "
- + "to SRM_ABORTED.", count);
- } else {
- log.trace("BoL CHUNK DAO! No chunks were transited from SRM_SUCCESS "
- + "to SRM_ABORTED.");
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transitSRM_SUCCESStoSRM_ABORTED! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that updates all chunks in SRM_SUCCESS state, into SRM_RELEASED. An
- * array of long representing the primary key of each chunk is required: only
- * they get the status changed provided their current status is SRM_SUCCESS.
- * This method is used during srmReleaseFiles In case of any error nothing
- * happens and no exception is thrown, but proper messages get logged.
- */
- public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids) {
-
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_BoL SET statusCode=? "
- + "WHERE statusCode=? AND request_BoLID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BoL CHUNK DAO! No chunk of BoL request "
- + "was transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited "
- + "from SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transit chunks from SRM_SUCCESS "
- + "to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids,
- TRequestToken token) {
-
- if (token == null) {
- transitSRM_SUCCESStoSRM_RELEASED(ids);
- } else {
- /*
- * If a request token has been specified, only the related BoL requests
- * have to be released. This is done adding the r.r_token="..." clause in
- * the where subquery.
- */
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? " + "WHERE sb.statusCode=? AND rq.r_token='"
- + token.toString() + "' AND rb.ID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
- logWarnings(stmt.getWarnings());
-
- log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BoL CHUNK DAO! No chunk of BoL request was "
- + "transited from SRM_SUCCESS to SRM_RELEASED.");
- } else {
- log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited "
- + "from SRM_SUCCESS to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Unable to transit chunks "
- + "from SRM_SUCCESS to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("BoL CHUNK DAO! Unable to close ResultSet! Exception: " + e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("BoL CHUNK DAO! Unable to close Statement {} - Exception: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- private void commit(Connection con) {
-
- if (con != null) {
- try {
- con.commit();
- con.setAutoCommit(true);
- } catch (SQLException e) {
- log.error("BoL, SQL EXception {}", e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to roll back a failed transaction
- */
- private void rollback(Connection con) {
-
- if (con != null) {
- try {
- con.rollback();
- con.setAutoCommit(true);
- log.error("BoL CHUNK DAO: roll back successful!");
- } catch (SQLException e2) {
- log.error("BoL CHUNK DAO: roll back failed! {}", e2.getMessage(), e2);
- }
- }
- }
-
- /**
- * Private method that returns the generated ID: it throws an exception in
- * case of any problem!
- */
- private int extractID(ResultSet rs) throws Exception {
-
- if (rs == null) {
- throw new Exception("BoL CHUNK DAO! Null ResultSet!");
- }
- if (rs.next()) {
- return rs.getInt(1);
- }
- log.error("BoL CHUNK DAO! It was not possible to establish "
- + "the assigned autoincrement primary key!");
- throw new Exception(
- "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!");
- }
-
- /**
- * Auxiliary private method that logs all SQL warnings.
- */
- private void logWarnings(SQLWarning w) {
-
- if (w != null) {
- log.debug("BoL CHUNK DAO: {}", w.toString());
- while ((w = w.getNextWarning()) != null) {
- log.debug("BoL CHUNK DAO: {}", w.toString());
- }
- }
- }
-
- /**
- * Method that returns a String containing all IDs.
- */
- private String makeWhereString(long[] rowids) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = rowids.length;
- for (int i = 0; i < n; i++) {
- sb.append(rowids[i]);
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
-
- for (int i = 0; i < n; i++) {
-
- SURL requestedSURL;
-
- try {
- requestedSURL = SURL.makeSURLfromString(surls[i]);
- } catch (NamespaceException e) {
- log.error(e.getMessage());
- log.debug("Skip '{}' during query creation", surls[i]);
- continue;
- }
-
- sb.append("'");
- sb.append(requestedSURL.getNormalFormAsString());
- sb.append("','");
- sb.append(requestedSURL.getQueryFormAsString());
- sb.append("'");
-
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
-
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Auxiliary method that sets up the connection to the DB, as well as the
- * prepared statement.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- logWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (ClassNotFoundException | SQLException e) {
- log.error("BoL CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("BoL CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that tales down a connection to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("BoL CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("BOL CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) "
- + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "SET sb.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sb.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- logWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- logWarnings(stmt.getWarnings());
-
- log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("BOL CHUNK DAO! No chunk of BOL request was updated from {} "
- + "to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("BOL CHUNK DAO! {} chunks of BOL requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO! Unable to updated from {} to {}!",
- expectedStatusCode, newStatusCode, e);
- } finally {
- close(stmt);
- }
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("BoL CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get chunks of the request
- String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, "
- + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, "
- + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) "
- + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) "
- + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID "
- + "WHERE ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rb.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
- if (withDn) {
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("BOL CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- BoLChunkDataTO chunkDataTO = null;
- while (rs.next()) {
-
- chunkDataTO = new BoLChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sb.statusCode"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPrimaryKey(rs.getLong("rb.ID"));
- chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rb.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rb.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("BOL CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sb.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rb.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rb.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java
deleted file mode 100644
index 3503e1c7a..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.namespace.model.Protocol;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Timestamp;
-import java.util.List;
-
-/**
- * Class that represents a row in the Persistence Layer: this is all raw data
- * referring to the BoLChunkData proper, that is, String and primitive types.
- *
- * Each field is initialized with default values as per SRM 2.2 specification:
- * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED
- *
- * All other fields are 0 if int, or a white space if String.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLChunkDataTO {
-
- /* Database table request_Bol fields BEGIN */
- private long primaryKey = -1; // ID primary key of record in DB
- private String fromSURL = " ";
- private boolean dirOption; // initialised in constructor
- private String normalizedStFN = null;
- private Integer surlUniqueID = null;
- /* Database table request_Get fields END */
-
- private String requestToken = " ";
- private int lifetime = 0;
- private boolean allLevelRecursive; // initialised in constructor
- private int numLevel; // initialised in constructor
- private List protocolList = null; // initialised in constructor
- private long filesize = 0;
- private int status; // initialised in constructor
- private String errString = " ";
- private int deferredStartTime = -1;
- private Timestamp timeStamp = null;
-
- public BoLChunkDataTO() {
-
- TURLPrefix protocolPreferences = new TURLPrefix();
- protocolPreferences.addProtocol(Protocol.GSIFTP);
- this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences);
- this.status = StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_QUEUED);
- this.dirOption = false;
- this.allLevelRecursive = false;
- this.numLevel = 0;
- }
-
- public boolean getAllLevelRecursive() {
-
- return allLevelRecursive;
- }
-
- public int getDeferredStartTime() {
-
- return deferredStartTime;
- }
-
- public boolean getDirOption() {
-
- return dirOption;
- }
-
- public String getErrString() {
-
- return errString;
- }
-
- public long getFileSize() {
-
- return filesize;
- }
-
- public String getFromSURL() {
-
- return fromSURL;
- }
-
- public int getLifeTime() {
-
- return lifetime;
- }
-
- public int getNumLevel() {
-
- return numLevel;
- }
-
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- public List getProtocolList() {
-
- return protocolList;
- }
-
- public String getRequestToken() {
-
- return requestToken;
- }
-
- public Timestamp getTimeStamp() {
-
- return timeStamp;
- }
-
- public int getStatus() {
-
- return status;
- }
-
- public void setAllLevelRecursive(boolean b) {
-
- allLevelRecursive = b;
- }
-
- public void setDeferredStartTime(int deferredStartTime) {
-
- this.deferredStartTime = deferredStartTime;
- }
-
- public void setDirOption(boolean b) {
-
- dirOption = b;
- }
-
- public void setErrString(String s) {
-
- errString = s;
- }
-
- public void setFileSize(long n) {
-
- filesize = n;
- }
-
- public void setFromSURL(String s) {
-
- fromSURL = s;
- }
-
- public void setLifeTime(int n) {
-
- lifetime = n;
- }
-
- public void setNumLevel(int n) {
-
- numLevel = n;
- }
-
- public void setPrimaryKey(long n) {
-
- primaryKey = n;
- }
-
- public void setProtocolList(List l) {
-
- if ((l != null) && (!l.isEmpty())) {
- protocolList = l;
- }
- }
-
- public void setRequestToken(String s) {
-
- requestToken = s;
- }
-
- public void setTimeStamp(Timestamp timeStamp) {
-
- this.timeStamp = timeStamp;
- }
-
- public void setStatus(int n) {
-
- status = n;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedStFN(String normalizedStFN) {
-
- this.normalizedStFN = normalizedStFN;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedStFN() {
-
- return normalizedStFN;
- }
-
- /**
- * @param surlUniqueID
- * the sURLUniqueID to set
- */
- public void setSurlUniqueID(Integer surlUniqueID) {
-
- this.surlUniqueID = surlUniqueID;
- }
-
- /**
- * @return the sURLUniqueID
- */
- public Integer sulrUniqueID() {
-
- return surlUniqueID;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append(primaryKey);
- sb.append(" ");
- sb.append(requestToken);
- sb.append(" ");
- sb.append(fromSURL);
- sb.append(" ");
- sb.append(normalizedStFN);
- sb.append(" ");
- sb.append(surlUniqueID);
- sb.append(" ");
- sb.append(lifetime);
- sb.append(" ");
- sb.append(dirOption);
- sb.append(" ");
- sb.append(allLevelRecursive);
- sb.append(" ");
- sb.append(numLevel);
- sb.append(" ");
- sb.append(protocolList);
- sb.append(" ");
- sb.append(filesize);
- sb.append(" ");
- sb.append(status);
- sb.append(" ");
- sb.append(errString);
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLData.java b/src/main/java/it/grid/storm/catalogs/BoLData.java
deleted file mode 100644
index a96bd1d6f..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLData.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.srm.types.TTURL;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a BringOnLineChunkData, that is part of a multifile
- * BringOnLine srm request. It contains data about: the requestToken, the
- * fromSURL, the requested lifeTime of pinning, the TDirOption which tells
- * whether the requested SURL is a directory and if it must be recursed at all
- * levels, as well as the desired number of levels to recurse, the desired
- * transferProtocols in order of preference, the fileSize, and the transferURL
- * for the supplied SURL.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLData extends AnonymousFileTransferData {
-
- private static final Logger log = LoggerFactory.getLogger(BoLData.class);
-
- /**
- * requested lifetime of TURL: it is the pin time!
- */
- private TLifeTimeInSeconds lifeTime;
-
- /**
- * specifies if the request regards a directory and related info
- */
- private TDirOption dirOption;
-
- /**
- * size of file
- */
- private TSizeInBytes fileSize;
-
- /**
- * how many seconds to wait before to make the lifeTime start consuming
- */
- private int deferredStartTime = 0;
-
- public BoLData(TSURL fromSURL, TLifeTimeInSeconds lifeTime,
- TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize,
- TReturnStatus status, TTURL transferURL, int deferredStartTime)
- throws InvalidFileTransferDataAttributesException,
- InvalidBoLDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, desiredProtocols, status, transferURL);
- if (lifeTime == null || dirOption == null || fileSize == null) {
- throw new InvalidBoLDataAttributesException(fromSURL, lifeTime,
- dirOption, desiredProtocols, fileSize, status, transferURL);
- }
- this.lifeTime = lifeTime;
- this.dirOption = dirOption;
- this.fileSize = fileSize;
- this.deferredStartTime = deferredStartTime;
- }
-
- /**
- * Method that sets the status of this request to SRM_FILE_PINNED; it needs
- * the explanation String which describes the situation in greater detail; if
- * a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_FILE_PINNED(String explanation) {
-
- setStatus(TStatusCode.SRM_FILE_PINNED, explanation);
- }
-
- public int getDeferredStartTime() {
-
- return deferredStartTime;
- }
-
- /**
- * Method that returns the dirOption specified in the srm request.
- */
- public TDirOption getDirOption() {
-
- return dirOption;
- }
-
- /**
- * Method that returns the file size for this chunk of the srm request.
- */
- public TSizeInBytes getFileSize() {
-
- return fileSize;
- }
-
- /**
- * Method that returns the requested pin life time for this chunk of the srm
- * request.
- */
- public TLifeTimeInSeconds getLifeTime() {
-
- return lifeTime;
- }
-
- public void setDeferredStartTime(int deferredStartTime) {
-
- this.deferredStartTime = deferredStartTime;
- }
-
- /**
- * Method used to set the size of the file corresponding to the requested
- * SURL. If the supplied TSizeInByte is null, then nothing gets set!
- */
- public void setFileSize(TSizeInBytes size) {
-
- if (size != null) {
- fileSize = size;
- }
- }
-
- public void setLifeTime(long lifeTimeInSeconds) {
-
- TLifeTimeInSeconds lifeTime;
- try {
- lifeTime = TLifeTimeInSeconds.make(lifeTimeInSeconds, TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- log.error(e.getMessage(), e);
- return;
- }
-
- this.lifeTime = lifeTime;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java
deleted file mode 100644
index 651686cba..000000000
--- a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TTURL;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a BringOnLineChunkData, that is part of a multifile
- * BringOnLine srm request. It contains data about: the requestToken, the
- * fromSURL, the requested lifeTime of pinning, the TDirOption which tells
- * whether the requested SURL is a directory and if it must be recursed at all
- * levels, as well as the desired number of levels to recurse, the desired
- * transferProtocols in order of preference, the fileSize, and the transferURL
- * for the supplied SURL.
- *
- * @author CNAF
- * @version 1.0
- * @date Aug 2009
- */
-public class BoLPersistentChunkData extends BoLData implements
- PersistentChunkData {
-
- private static final Logger log = LoggerFactory
- .getLogger(BoLPersistentChunkData.class);
-
- /**
- * long representing the primary key for the persistence layer, in the
- * status_Put table
- */
- private long primaryKey = -1;
-
- /**
- * This is the requestToken of the multifile srm request to which this chunk
- * belongs
- */
- private final TRequestToken requestToken;
-
- public BoLPersistentChunkData(TRequestToken requestToken, TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL, int deferredStartTime)
- throws InvalidBoLPersistentChunkDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidBoLDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status,
- transferURL, deferredStartTime);
- if (requestToken == null) {
- log.debug("BoLPersistentChunkData: requestToken is null!");
- throw new InvalidBoLPersistentChunkDataAttributesException(requestToken,
- fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status,
- transferURL);
- }
- this.requestToken = requestToken;
- }
-
- /**
- * Method that returns the requestToken of the srm request to which this chunk
- * belongs.
- */
- public TRequestToken getRequestToken() {
-
- return requestToken;
- }
-
- /**
- * Method used to get the primary key used in the persistence layer!
- */
- @Override
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- /**
- * Method used to set the primary key to be used in the persistence layer!
- */
- public void setPrimaryKey(long l) {
-
- primaryKey = l;
- }
-
- @Override
- public long getIdentifier() {
-
- return getPrimaryKey();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java
index d7775310f..e8be1daf4 100644
--- a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java
+++ b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java
@@ -1,30 +1,9 @@
package it.grid.storm.catalogs;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.SQLWarning;
-
public class ChunkDAOUtils {
- private static final Logger log = LoggerFactory.getLogger(ChunkDAOUtils.class);
-
protected ChunkDAOUtils() {}
- public static void printWarnings(SQLWarning warning) {
-
- if (warning != null) {
- log.warn("---Warning---");
-
- while (warning != null) {
- log.warn("Message: {}", warning.getMessage());
- log.warn("SQLState: {}", warning.getSQLState());
- log.warn("Vendor error code: {}", warning.getErrorCode());
- warning = warning.getNextWarning();
- }
- }
- }
-
public static String buildInClauseForArray(int size) {
StringBuilder b = new StringBuilder();
for (int i=1; i<=size; i++) {
diff --git a/src/main/java/it/grid/storm/catalogs/ChunkData.java b/src/main/java/it/grid/storm/catalogs/ChunkData.java
deleted file mode 100644
index c79c4f406..000000000
--- a/src/main/java/it/grid/storm/catalogs/ChunkData.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package it.grid.storm.catalogs;
-
-public interface ChunkData extends RequestData {
-
- /**
- * Method that returns the primary key in persistence, associated with This
- * Chunk.
- */
- public long getIdentifier();
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java
deleted file mode 100644
index e09798ce8..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TimeUnit;
-import it.grid.storm.griduser.GridUserInterface;
-// import it.grid.storm.namespace.SurlStatusStore;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that represents StoRMs CopyChunkCatalog: it collects CopyChunkData and
- * provides methods for looking up a CopyChunkData based on TRequestToken, as
- * well as for updating an existing one.
- *
- * @author EGRID - ICTP Trieste
- * @date september, 2005
- * @version 2.0
- */
-public class CopyChunkCatalog {
-
- private static final Logger log = LoggerFactory
- .getLogger(CopyChunkCatalog.class);
-
- /* only instance of CopyChunkCatalog present in StoRM! */
- private static final CopyChunkCatalog cat = new CopyChunkCatalog();
- /* WARNING!!! TO BE MODIFIED WITH FACTORY!!! */
- private CopyChunkDAO dao = CopyChunkDAO.getInstance();
-
- private CopyChunkCatalog() {
-
- }
-
- /**
- * Method that returns the only instance of PtPChunkCatalog available.
- */
- public static CopyChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved CopyChunkData. In case
- * any error occurs, the operation does not proceed and no Exception is
- * thrown.
- *
- * Beware that the only fields updated into persistence are the StatusCode and
- * the errorString.
- */
- synchronized public void update(CopyPersistentChunkData cd) {
-
- CopyChunkDataTO to = new CopyChunkDataTO();
- /* primary key needed by DAO Object */
- to.setPrimaryKey(cd.getPrimaryKey());
- to.setLifeTime(FileLifetimeConverter.getInstance().toDB(
- cd.getLifetime().value()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- cd.getStatus().getStatusCode()));
- to.setErrString(cd.getStatus().getExplanation());
- to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(
- cd.getFileStorageType()));
- to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(
- cd.getOverwriteOption()));
- to.setNormalizedSourceStFN(cd.getSURL().normalizedStFN());
- to.setSourceSurlUniqueID(new Integer(cd.getSURL().uniqueId()));
- to.setNormalizedTargetStFN(cd.getDestinationSURL().normalizedStFN());
- to.setTargetSurlUniqueID(new Integer(cd.getDestinationSURL().uniqueId()));
-
- dao.update(to);
- }
-
- /**
- * Method that returns a Collection of CopyChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a CopyChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a messagge gets logged.
- */
- synchronized public Collection lookup(
- TRequestToken rt) {
-
- Collection chunkDataTOs = dao.find(rt);
- log.debug("COPY CHUNK CATALOG: retrieved data {}", chunkDataTOs);
- return buildChunkDataList(chunkDataTOs, rt);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs, TRequestToken rt) {
-
- ArrayList list = new ArrayList();
- CopyPersistentChunkData chunk;
- for (CopyChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedCopyChunkDataAttributesException e) {
- log.warn("COPY CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: {}", e.getMessage());
- }
- }
- log.debug("COPY CHUNK CATALOG: returning {}\n\n", list);
- return list;
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs) {
-
- ArrayList list = new ArrayList();
- CopyPersistentChunkData chunk;
- for (CopyChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedCopyChunkDataAttributesException e) {
- log.warn("COPY CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("COPY CHUNK CATALOG: returning {}\n\n", list);
- return list;
- }
-
- public Collection lookupCopyChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(requestToken,
- surlsUniqueIDs, surlsArray);
- return buildChunkDataList(chunkDataTOs, requestToken);
- }
-
- public Collection lookupCopyChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupCopyChunkData(TSURL surl) {
-
- return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- private Collection lookupCopyChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- return buildChunkDataList(chunkDataTOs);
- }
-
- public Collection lookupCopyChunkData(
- List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray);
- return buildChunkDataList(chunkDataTOs);
- }
-
- private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Generates a CopyChunkData from the received CopyChunkDataTO
- *
- * @param chunkDataTO
- * @param rt
- * @return
- */
- private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkDataTO,
- TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedSourceStFN() != null) {
- fromSURL.setNormalizedStFN(chunkDataTO.normalizedSourceStFN());
- }
- if (chunkDataTO.sourceSurlUniqueID() != null) {
- fromSURL.setUniqueID(chunkDataTO.sourceSurlUniqueID().intValue());
- }
- // toSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(chunkDataTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedTargetStFN() != null) {
- toSURL.setNormalizedStFN(chunkDataTO.normalizedTargetStFN());
- }
- if (chunkDataTO.targetSurlUniqueID() != null) {
- toSURL.setUniqueID(chunkDataTO.targetSurlUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- lifeTime = TLifeTimeInSeconds.make(FileLifetimeConverter.getInstance()
- .toStoRM(chunkDataTO.lifeTime()), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(chunkDataTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- log.error("\nTFileStorageType could not be translated from its String "
- + "representation! String: {}", chunkDataTO.fileStorageType());
- // fail creation of PtPChunk!
- fileStorageType = null;
- }
- // spaceToken!
- //
- // WARNING! Although this field is in common between StoRM and DPM, a
- // converter is still used
- // because DPM logic for NULL/EMPTY is not known. StoRM model does not
- // allow for null, so it must
- // be taken care of!
- TSpaceToken spaceToken = null;
- TSpaceToken emptyToken = TSpaceToken.makeEmpty();
- // convert empty string representation of DPM into StoRM representation;
- String spaceTokenTranslation = SpaceTokenStringConverter.getInstance()
- .toStoRM(chunkDataTO.spaceToken());
- if (emptyToken.toString().equals(spaceTokenTranslation)) {
- spaceToken = emptyToken;
- } else {
- try {
- spaceToken = TSpaceToken.make(spaceTokenTranslation);
- } catch (InvalidTSpaceTokenAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // overwriteOption!
- TOverwriteMode globalOverwriteOption = OverwriteModeConverter.getInstance()
- .toSTORM(chunkDataTO.overwriteOption());
- if (globalOverwriteOption == TOverwriteMode.EMPTY) {
- errorSb.append("\nTOverwriteMode could not be "
- + "translated from its String representation! String: "
- + chunkDataTO.overwriteOption());
- globalOverwriteOption = null;
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- chunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + chunkDataTO.status());
- } else {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- // make CopyChunkData
- CopyPersistentChunkData aux = null;
- try {
- aux = new CopyPersistentChunkData(rt, fromSURL, toSURL, lifeTime,
- fileStorageType, spaceToken, globalOverwriteOption, status);
- aux.setPrimaryKey(chunkDataTO.primaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedCopyChunk(chunkDataTO);
- log.warn("COPY CHUNK CATALOG! Retrieved malformed Copy"
- + " chunk data from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage());
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received CopyChunkDataTO the normalized StFN and the SURL
- * unique ID taken from the CopyChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedCopyChunkDataTO chunkTO,
- final ReducedCopyChunkData chunk) {
-
- chunkTO.setNormalizedSourceStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSourceSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- chunkTO.setNormalizedTargetStFN(chunk.toSURL().normalizedStFN());
- chunkTO.setTargetSurlUniqueID(new Integer(chunk.toSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedCopyChunkDataTO from the received CopyChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedCopyChunkDataAttributesException
- */
- private ReducedCopyChunkDataTO completeTO(CopyChunkDataTO chunkTO,
- final CopyPersistentChunkData chunk)
- throws InvalidReducedCopyChunkDataAttributesException {
-
- ReducedCopyChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedCopyChunkData from the data contained in the received
- * CopyChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedCopyChunkData reduce(CopyPersistentChunkData chunk)
- throws InvalidReducedCopyChunkDataAttributesException {
-
- ReducedCopyChunkData reducedChunk = new ReducedCopyChunkData(
- chunk.getSURL(), chunk.getDestinationSURL(), chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedCopyChunkDataTO from the data contained in the received
- * CopyChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedCopyChunkDataTO reduce(CopyChunkDataTO chunkTO) {
-
- ReducedCopyChunkDataTO reducedChunkTO = new ReducedCopyChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setFromSURL(chunkTO.fromSURL());
- reducedChunkTO.setNormalizedSourceStFN(chunkTO.normalizedSourceStFN());
- reducedChunkTO.setSourceSurlUniqueID(chunkTO.sourceSurlUniqueID());
- reducedChunkTO.setToSURL(chunkTO.toSURL());
- reducedChunkTO.setNormalizedTargetStFN(chunkTO.normalizedTargetStFN());
- reducedChunkTO.setTargetSurlUniqueID(chunkTO.targetSurlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received CopyChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(CopyChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedSourceStFN() != null)
- && (chunkTO.sourceSurlUniqueID() != null && chunkTO
- .normalizedTargetStFN() != null)
- && (chunkTO.targetSurlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- @SuppressWarnings("unused")
- private boolean isComplete(ReducedCopyChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedSourceStFN() != null)
- && (reducedChunkTO.sourceSurlUniqueID() != null && reducedChunkTO
- .normalizedTargetStFN() != null)
- && (reducedChunkTO.targetSurlUniqueID() != null);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java
deleted file mode 100644
index 4e55e5446..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.SQLWarning;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- *
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID - ICTP Trieste
- * @version 2.0
- * @date September 2005
- */
-public class CopyChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(CopyChunkDAO.class);
-
- /* String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /* String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getDBURL();
- /* String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /* String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
-
- /* Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- /* boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- /* Singleton instance */
- private final static CopyChunkDAO dao = new CopyChunkDAO();
-
- /* timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /*
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /* milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000;
- /* initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- private CopyChunkDAO() {
-
- setUpConnection();
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the CopyChunkDAO.
- */
- public static CopyChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to save the changes made to a retrieved CopyChunkDataTO, back
- * into the MySQL DB.
- *
- * Only statusCode and explanation, of status_Copy table get written to the
- * DB. Likewise for fileLifetime of request_queue table.
- *
- * In case of any error, an error messagge gets logged but no exception is
- * thrown.
- */
- public synchronized void update(CopyChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) "
- + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) "
- + "SET sc.statusCode=?, sc.explanation=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, "
- + "rc.normalized_sourceSURL_StFN=?, rc.sourceSURL_uniqueID=?, rc.normalized_targetSURL_StFN=?, rc.targetSURL_uniqueID=? "
- + "WHERE rc.ID=?");
- logWarnings(con.getWarnings());
-
- updateFileReq.setInt(1, to.status());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(2, to.errString());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(3, to.lifeTime());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(4, to.fileStorageType());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(5, to.overwriteOption());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(6, to.normalizedSourceStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(7, to.sourceSurlUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(8, to.normalizedTargetStFN());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(9, to.targetSurlUniqueID());
- logWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(10, to.primaryKey());
- logWarnings(updateFileReq.getWarnings());
-
- // run updateFileReq
- updateFileReq.executeUpdate();
- logWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: Unable to complete update! {}",
- e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Get represented by the received ReducedPtGChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedCopyChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Copy SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=?, normalized_targetSURL_StFN=?, targetSURL_uniqueID=? "
- + "WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedSourceStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.sourceSurlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setString(3, chunkTO.normalizedTargetStFN());
- logWarnings(stmt.getWarnings());
-
- stmt.setInt(4, chunkTO.targetSurlUniqueID());
- logWarnings(stmt.getWarnings());
-
- stmt.setLong(5, chunkTO.primaryKey());
- logWarnings(stmt.getWarnings());
-
- log.trace("COPY CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding CopyChunkDataTO
- * objects.
- *
- * A complex query establishes all chunks associated with the request token,
- * by properly joining request_queue, request_Copy and status_Copy. The
- * considered fields are:
- *
- * (1) From status_Copy: the ID field which becomes the TOs primary key, and
- * statusCode.
- *
- * (2) From request_Copy: targetSURL and sourceSURL.
- *
- * (3) From request_queue: fileLifetime, config_FileStorageTypeID, s_token,
- * config_OverwriteID.
- *
- * In case of any error, a log gets written and an empty collection is
- * returned. No exception is returned.
- *
- * NOTE! Chunks in SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(
- TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /* get chunks of the request */
- str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sc.statusCode<>?";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- logWarnings(find.getWarnings());
-
- log.debug("COPY CHUNK DAO: find method; " + find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- CopyChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
-
- }
-
- public synchronized Collection find(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /* get chunks of the request */
- str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rc.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
-
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- logWarnings(find.getWarnings());
-
- log.debug("COPY CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
-
- CopyChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
-
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model.
- *
- * This method attempts to change the status of the request to SRM_FAILURE and
- * record it in the DB.
- *
- * This operation could potentially fail because the source of the malformed
- * problems could be a problematic DB; indeed, initially only log messagges
- * where recorded.
- *
- * Yet it soon became clear that the source of malformed data were the clients
- * and/or FE recording info in the DB. In these circumstances the client would
- * its request as being in the SRM_IN_PROGRESS state for ever. Hence the
- * pressing need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedCopyChunk(CopyChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: signalMalformedCopyChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_Copy SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_CopyID=" + auxTO.primaryKey();
-
- PreparedStatement signal = null;
- try {
- /* update storm_put_filereq */
- signal = con.prepareStatement(signalSQL);
- logWarnings(con.getWarnings());
-
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- logWarnings(signal.getWarnings());
-
- signal.executeUpdate();
- logWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("CopyChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Error: {}", auxTO.toString(),
- e.getMessage(), e);
- } finally {
- close(signal);
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("COPY CHUNK DAO! Unable to close Statement {} - Error: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("COPY CHUNK DAO! Unable to close ResultSet! Error: {}",
- e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary private method that logs all SQL warnings.
- */
- private void logWarnings(SQLWarning w) {
-
- if (w != null) {
- log.debug("COPY CHUNK DAO: {}", w.toString());
- while ((w = w.getNextWarning()) != null) {
- log.debug("COPY CHUNK DAO: {}", w.toString());
- }
- }
- }
-
- /**
- * Auxiliary method that sets up the conenction to the DB.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- logWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (SQLException | ClassNotFoundException e) {
- log.error("COPY CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private synchronized boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("COPY CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that takes down a conenctin to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("COPY CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) "
- + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) "
- + "SET sc.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sc.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- logWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- logWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- logWarnings(stmt.getWarnings());
-
- log.trace("COPY CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- logWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("COPY CHUNK DAO! No chunk of COPY request was updated "
- + "from {} to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("COPY CHUNK DAO! {} chunks of COPY requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO! Unable to updated from {} to {}! {}",
- expectedStatusCode, newStatusCode, e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
- for (int i = 0; i < n; i++) {
- sb.append("'");
- sb.append(surls[i]);
- sb.append("'");
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- public synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("COPY CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- String str = "SELECT rq.r_token, rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, "
- + "rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, "
- + "rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, "
- + "d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) "
- + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) "
- + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID "
- + "WHERE ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rc.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
- if (withDn) {
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
- find = con.prepareStatement(str);
- logWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("COPY CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- logWarnings(find.getWarnings());
- CopyChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new CopyChunkDataTO();
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setPrimaryKey(rs.getLong("rc.ID"));
- chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL"));
- chunkDataTO.setNormalizedSourceStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setToSURL(rs.getString("rc.targetSURL"));
- chunkDataTO.setNormalizedTargetStFN(rs
- .getString("rc.normalized_sourceSURL_StFN"));
- uniqueID = rs.getInt("rc.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID));
- }
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("COPY CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sc.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rc.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rc.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java
deleted file mode 100644
index 41e197eb1..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import java.sql.Timestamp;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TStatusCode;
-
-/**
- * Class that represents a row in the Persistence Layer: this is all raw data
- * referring to the CopyChunkData proper, that is, String and primitive types.
- *
- * Each field is initialized with default values as per SRM 2.2 specification:
- * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED
- *
- * All other fields are 0 if int, or a white space if String.
- *
- * @author EGRID ICTP
- * @version 2.0
- * @date Semptember 2005
- */
-public class CopyChunkDataTO {
-
- /* Database table request_Get fields BEGIN */
- private long primaryKey = -1; // ID primary key of record in DB
- private String fromSURL = " ";
- private String toSURL = " ";
- private String normalizedSourceStFN = null;
- private Integer sourceSurlUniqueID = null;
- private String normalizedTargetStFN = null;
- private Integer targetSurlUniqueID = null;
- /* Database table request_Get fields END */
-
- private String requestToken = " ";
- private int lifetime = 0;
- private String fileStorageType = null; // initialised in constructor
- private String spaceToken = " ";
- private String overwriteOption = null; // initialised in constructor
- private int status; // initialised in constructor
- private String errString = " ";
- private Timestamp timeStamp = null;
-
- public CopyChunkDataTO() {
-
- fileStorageType = FileStorageTypeConverter.getInstance().toDB(
- TFileStorageType.VOLATILE);
- overwriteOption = OverwriteModeConverter.getInstance().toDB(
- TOverwriteMode.NEVER);
- status = StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_QUEUED);
- }
-
- public long primaryKey() {
-
- return primaryKey;
- }
-
- public void setPrimaryKey(long n) {
-
- primaryKey = n;
- }
-
- public String requestToken() {
-
- return requestToken;
- }
-
- public void setRequestToken(String s) {
-
- requestToken = s;
- }
-
- public Timestamp timeStamp() {
-
- return timeStamp;
- }
-
- public void setTimeStamp(Timestamp timeStamp) {
-
- this.timeStamp = timeStamp;
- }
-
- public String fromSURL() {
-
- return fromSURL;
- }
-
- public void setFromSURL(String s) {
-
- fromSURL = s;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedSourceStFN() {
-
- return normalizedSourceStFN;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedSourceStFN(String normalizedStFN) {
-
- this.normalizedSourceStFN = normalizedStFN;
- }
-
- /**
- * @return the surlUniqueID
- */
- public Integer sourceSurlUniqueID() {
-
- return sourceSurlUniqueID;
- }
-
- /**
- * @param surlUniqueID
- * the surlUniqueID to set
- */
- public void setSourceSurlUniqueID(Integer surlUniqueID) {
-
- this.sourceSurlUniqueID = surlUniqueID;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedTargetStFN() {
-
- return normalizedTargetStFN;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedTargetStFN(String normalizedStFN) {
-
- this.normalizedTargetStFN = normalizedStFN;
- }
-
- /**
- * @return the surlUniqueID
- */
- public Integer targetSurlUniqueID() {
-
- return targetSurlUniqueID;
- }
-
- /**
- * @param surlUniqueID
- * the surlUniqueID to set
- */
- public void setTargetSurlUniqueID(Integer surlUniqueID) {
-
- this.targetSurlUniqueID = surlUniqueID;
- }
-
- public String toSURL() {
-
- return toSURL;
- }
-
- public void setToSURL(String s) {
-
- toSURL = s;
- }
-
- public int lifeTime() {
-
- return lifetime;
- }
-
- public void setLifeTime(int n) {
-
- lifetime = n;
- }
-
- public String fileStorageType() {
-
- return fileStorageType;
- }
-
- /**
- * Method used to set the FileStorageType: if s is null nothing gets set; the
- * internal default String is the one relative to Volatile FileStorageType.
- */
- public void setFileStorageType(String s) {
-
- if (s != null)
- fileStorageType = s;
- }
-
- public String spaceToken() {
-
- return spaceToken;
- }
-
- public void setSpaceToken(String s) {
-
- spaceToken = s;
- }
-
- public String overwriteOption() {
-
- return overwriteOption;
- }
-
- /**
- * Method used to set the OverwriteMode: if s is null nothing gets set; the
- * internal default String is the one relative to Never OverwriteMode.
- */
- public void setOverwriteOption(String s) {
-
- if (s != null)
- overwriteOption = s;
- }
-
- public int status() {
-
- return status;
- }
-
- public void setStatus(int n) {
-
- status = n;
- }
-
- public String errString() {
-
- return errString;
- }
-
- public void setErrString(String s) {
-
- errString = s;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append(primaryKey);
- sb.append(" ");
- sb.append(requestToken);
- sb.append(" ");
- sb.append(fromSURL);
- sb.append(" ");
- sb.append(normalizedSourceStFN);
- sb.append(" ");
- sb.append(sourceSurlUniqueID);
- sb.append(" ");
- sb.append(toSURL);
- sb.append(" ");
- sb.append(normalizedTargetStFN);
- sb.append(" ");
- sb.append(targetSurlUniqueID);
- sb.append(" ");
- sb.append(lifetime);
- sb.append(" ");
- sb.append(fileStorageType);
- sb.append(" ");
- sb.append(spaceToken);
- sb.append(" ");
- sb.append(overwriteOption);
- sb.append(" ");
- sb.append(status);
- sb.append(" ");
- sb.append(errString);
- sb.append(" ");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyData.java b/src/main/java/it/grid/storm/catalogs/CopyData.java
deleted file mode 100644
index 5d1437fcb..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyData.java
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a CopyChunkData, that is part of a multifile Copy srm
- * request. It contains data about: the requestToken, the fromSURL, the toSURL,
- * the target fileLifeTime, the target fileStorageType and any available target
- * spaceToken, the target overwriteOption to be applied in case the file already
- * exists, the fileSize of the existing file if any, return status of the file
- * together with its error string.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class CopyData extends SurlMultyOperationRequestData {
-
- private static final Logger log = LoggerFactory.getLogger(CopyData.class);
-
- /**
- * SURL to which the srmCopy will put the file
- */
- protected TSURL destinationSURL;
-
- /**
- * requested lifetime - BEWARE!!! It is the fileLifetime at destination in
- * case of Volatile files!
- */
- protected TLifeTimeInSeconds lifetime;
-
- /**
- * TFileStorageType at destination
- */
- protected TFileStorageType fileStorageType;
-
- /**
- * SpaceToken to use for toSURL
- */
- protected TSpaceToken spaceToken;
-
- /**
- * specifies the behaviour in case of existing files for Put part of the copy
- * (could be local or remote!)
- */
- protected TOverwriteMode overwriteOption;
-
- public CopyData(TSURL fromSURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status)
- throws InvalidCopyDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, status);
- if (destinationSURL == null || lifetime == null || fileStorageType == null
- || spaceToken == null || overwriteOption == null) {
- throw new InvalidCopyDataAttributesException(fromSURL, destinationSURL,
- lifetime, fileStorageType, spaceToken, overwriteOption, status);
- }
- this.destinationSURL = destinationSURL;
- this.lifetime = lifetime;
- this.fileStorageType = fileStorageType;
- this.spaceToken = spaceToken;
- this.overwriteOption = overwriteOption;
- }
-
- /**
- * Method that returns the toSURL of the srm request to which this chunk
- * belongs.
- */
- public TSURL getDestinationSURL() {
-
- return destinationSURL;
- }
-
- /**
- * Method that returns the requested pin life time for this chunk of the srm
- * request.
- */
- public TLifeTimeInSeconds getLifetime() {
-
- return lifetime;
- }
-
- /**
- * Method that returns the fileStorageType for this chunk of the srm request.
- */
- public TFileStorageType getFileStorageType() {
-
- return fileStorageType;
- }
-
- /**
- * Method that returns the space token supplied for this chunk of the srm
- * request.
- */
- public TSpaceToken getSpaceToken() {
-
- return spaceToken;
- }
-
- /**
- * Method that returns the overwriteOption specified in the srm request.
- */
- public TOverwriteMode getOverwriteOption() {
-
- return overwriteOption;
- }
-
- /**
- * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_DUPLICATION_ERROR(String explanation) {
-
- setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation);
- }
-
- /**
- * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it
- * needs the explanation String which describes the situation in greater
- * detail; if a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) {
-
- setStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, explanation);
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java
deleted file mode 100644
index 6f08504f5..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-
-/**
- * Package private auxiliary class used to convert between DPM and StoRM
- * representation of Copy TOverwriteMode+RemoveSourceFiles global information
- * for the whole request, and Flags in storm_req.
- *
- * @author: EGRID - ICTP Trieste
- * @version: 1.0
- * @date: September 2005
- */
-class CopyGlobalFlagConverter {
-
- private Map DPMtoSTORM = new HashMap();
- private Map STORMtoDPM = new HashMap();
-
- private static CopyGlobalFlagConverter c = new CopyGlobalFlagConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DPM
- * uses int values to represent the pair of values:
- *
- * 0 NEVER + DO NOT RemoveSourceFiles 1 ALWAYS + DO NOT RemoveSourceFiles 2
- * WHENFILESAREDIFFERENT + DO NOT RemoveSourceFiles 4 NEVER +
- * RemoveSourceFiles 5 ALWAYS + RemoveSourceFiles 6 WHENFILESAREDIFFERENT +
- * RemoveSourceFiles
- */
- private CopyGlobalFlagConverter() {
-
- DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(2), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) });
- DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(6), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) });
- Object aux;
- for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDPM.put(DPMtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of OverwriteModeConverter.
- */
- public static CopyGlobalFlagConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode and removeSourceFiles boolean. -1 is returned if no match is
- * found.
- */
- public int toDPM(TOverwriteMode om, boolean removeSourceFiles) {
-
- Integer aux = (Integer) STORMtoDPM.get(new Object[] { om,
- new Boolean(removeSourceFiles) });
- if (aux == null)
- return -1;
- return aux.intValue();
- }
-
- /**
- * Method that returns an Object[] containing the TOverwriteMode and the
- * boolean used by StoRM to represent the supplied int representation of DPM.
- * An empty Object[] is returned if no StoRM type is found.
- */
- public Object[] toSTORM(int n) {
-
- Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n));
- if (aux == null)
- return new Object[] {};
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM
- + "\nSTORMtoDPM map:" + STORMtoDPM;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java
deleted file mode 100644
index c9c1185aa..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a CopyChunkData, that is part of a multifile Copy srm
- * request. It contains data about: the requestToken, the fromSURL, the toSURL,
- * the target fileLifeTime, the target fileStorageType and any available target
- * spaceToken, the target overwriteOption to be applied in case the file already
- * exists, the fileSize of the existing file if any, return status of the file
- * together with its error string.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class CopyPersistentChunkData extends CopyData implements
- PersistentChunkData {
-
- private static final Logger log = LoggerFactory
- .getLogger(CopyPersistentChunkData.class);
-
- /**
- * long representing the primary key for the persistence layer!
- */
- private long primaryKey = -1;
-
- /**
- * This is the requestToken of the multifile srm request to which this chunk
- * belongs
- */
- private TRequestToken requestToken;
-
- public CopyPersistentChunkData(TRequestToken requestToken, TSURL fromSURL,
- TSURL destinationSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption, TReturnStatus status)
- throws InvalidCopyPersistentChunkDataAttributesException,
- InvalidCopyDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- if (requestToken == null) {
- log.debug("CopyPersistentChunkData: requestToken is null!");
- throw new InvalidCopyPersistentChunkDataAttributesException(requestToken,
- fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- }
- this.requestToken = requestToken;
- }
-
- /**
- * Method used to get the primary key used in the persistence layer!
- */
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- /**
- * Method used to set the primary key to be used in the persistence layer!
- */
- public void setPrimaryKey(long l) {
-
- primaryKey = l;
- }
-
- /**
- * Method that returns the requestToken of the srm request to which this chunk
- * belongs.
- */
- public TRequestToken getRequestToken() {
-
- return requestToken;
- }
-
- @Override
- public long getIdentifier() {
-
- return getPrimaryKey();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java
deleted file mode 100644
index b83a7daa7..000000000
--- a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-
-/**
- * Package private auxiliary class used to convert between DPM and StoRM
- * representation of Copy TOverwriteMode+TDirOption request specific
- * information, and Flags in storm_copy_filereq.
- *
- * @author: EGRID - ICTP Trieste
- * @version: 1.0
- * @date: September 2005
- */
-class CopySpecificFlagConverter {
-
- private Map DPMtoSTORM = new HashMap();
- private Map STORMtoDPM = new HashMap();
-
- private static CopySpecificFlagConverter c = new CopySpecificFlagConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DPM
- * uses int values to represent the pair of values:
- *
- * 0 NEVER + source NOT directory 1 ALWAYS + source NOT directory 2
- * WHENFILESAREDIFFERENT + source NOT directory 4 NEVER + source is directory
- * 5 ALWAYS + source is directory 6 WHENFILESAREDIFFERENT + source is
- * directory
- */
- private CopySpecificFlagConverter() {
-
- DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(false) });
- DPMtoSTORM.put(new Integer(2), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) });
- DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS,
- new Boolean(true) });
- DPMtoSTORM.put(new Integer(6), new Object[] {
- TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) });
- Object aux;
- for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDPM.put(DPMtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of CopySpecificFlagConverter.
- */
- public static CopySpecificFlagConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode and isSourceADirectory boolean. -1 is returned if no match
- * is found.
- */
- public int toDPM(TOverwriteMode om, boolean isSourceADirectory) {
-
- Integer aux = (Integer) STORMtoDPM.get(new Object[] { om,
- new Boolean(isSourceADirectory) });
- if (aux == null)
- return -1;
- return aux.intValue();
- }
-
- /**
- * Method that returns an Object[] containing the TOverwriteMode and the
- * Boolean used by StoRM to represent the supplied int representation of DPM.
- * An empty Object[] is returned if no StoRM type is found.
- */
- public Object[] toSTORM(int n) {
-
- Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n));
- if (aux == null)
- return new Object[] {};
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM
- + "\nSTORMtoDPM map:" + STORMtoDPM;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java b/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java
deleted file mode 100644
index 55391d4d7..000000000
--- a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-/**
- * Package private class that translates between DPM flag for TDirOption and
- * StoRM TDirOption proper.
- *
- * In particular DPM uses the int 1 to denote a recursive call, yet it fails to
- * distinguish between a chosen recursion level; in other words there is no way
- * that DPM specifies the number of levels to recurse: so either you recurse
- * till the end or nothing.
- *
- * @author EGRID - ICTP Trieste
- * @version 1.0
- * @date August, 2005
- */
-class DirOptionConverter {
-
- static private DirOptionConverter converter = null;
-
- private DirOptionConverter() {
-
- }
-
- static public DirOptionConverter getInstance() {
-
- if (converter == null)
- converter = new DirOptionConverter();
- return converter;
- }
-
- /**
- * Method that translates the int used by DPM as flag for TDirOption, into a
- * boolean for isDirOption.
- *
- * 1 causes true to be returned; any other value returns 0.
- */
- public boolean toSTORM(int n) {
-
- return (n == 1);
- }
-
- /**
- * Method used to translate the boolean isDirOption into an int used by DPM to
- * express the same thing.
- *
- * true gets translated into 1; false into 0.
- */
- public int toDPM(boolean isDirOption) {
-
- if (isDirOption)
- return 1;
- return 0;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java
deleted file mode 100644
index d84d199cf..000000000
--- a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.config.Configuration;
-
-/**
- * Class that handles DB representation of a pinLifetime as expressed by a
- * TLifetimeInSeconds objects; in particular it takes care of protocol
- * specification:
- *
- * 0/null/negative are translated as default StoRM configurable values. StoRMs
- * Empty TLifeTimeInSeconds is translated as 0.
- *
- * @author EGRID ICTP
- * @version 1.0
- * @date March 2007
- */
-public class FileLifetimeConverter {
-
- private static FileLifetimeConverter stc = new FileLifetimeConverter(); // only
- // instance
-
- private FileLifetimeConverter() {
-
- }
-
- /**
- * Method that returns the only instance of SizeInBytesIntConverter
- */
- public static FileLifetimeConverter getInstance() {
-
- return stc;
- }
-
- /**
- * Method that translates the Empty TLifeTimeInSeconds into the empty
- * representation of DB which is 0. Any other value is left as is.
- */
- public int toDB(long l) {
-
- if (l == TLifeTimeInSeconds.makeEmpty().value())
- return 0;
- return new Long(l).intValue();
- }
-
- /**
- * Method that returns the long corresponding to the int value in the DB,
- * except if it is 0, NULL or negative; a configurable default value is
- * returned instead, corresponding to the getFileLifetimeDefault()
- * Configuration class method.
- */
- public long toStoRM(int s) {
-
- if (s <= 0)
- return Configuration.getInstance().getFileLifetimeDefault();
- return new Integer(s).longValue();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java
deleted file mode 100644
index 2e27fdb1a..000000000
--- a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.config.Configuration;
-
-/**
- * Package private auxiliary class used to convert between DB raw data and StoRM
- * object model representation of TFileStorageType.
- *
- * @author: EGRID ICTP
- * @version: 2.0
- * @date: June 2005
- */
-class FileStorageTypeConverter {
-
- private Map DBtoSTORM = new HashMap();
- private Map STORMtoDB = new HashMap();
-
- private static FileStorageTypeConverter c = new FileStorageTypeConverter();
-
- /**
- * Private constructor that fills in the conversion tables;
- *
- * V - VOLATILE P - PERMANENT D - DURABLE
- */
- private FileStorageTypeConverter() {
-
- DBtoSTORM.put("V", TFileStorageType.VOLATILE);
- DBtoSTORM.put("P", TFileStorageType.PERMANENT);
- DBtoSTORM.put("D", TFileStorageType.DURABLE);
- String aux;
- for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDB.put(DBtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of FileStorageTypeConverter.
- */
- public static FileStorageTypeConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the String used in the DB to represent the given
- * TFileStorageType. The empty String "" is returned if no match is found.
- */
- public String toDB(TFileStorageType fst) {
-
- String aux = (String) STORMtoDB.get(fst);
- if (aux == null)
- return "";
- return aux;
- }
-
- /**
- * Method that returns the TFileStorageType used by StoRM to represent the
- * supplied String representation in the DB. A configured default
- * TFileStorageType is returned in case no corresponding StoRM type is found.
- * TFileStorageType.EMPTY is returned if there are configuration errors.
- */
- public TFileStorageType toSTORM(String s) {
-
- TFileStorageType aux = DBtoSTORM.get(s);
- if (aux == null)
- // This case is that the String s is different from V,P or D.
- aux = DBtoSTORM.get(Configuration.getInstance()
- .getDefaultFileStorageType());
- if (aux == null)
- // This case should never happen, but in case we prefer ponder PERMANENT.
- return TFileStorageType.EMPTY;
- else
- return aux;
- }
-
- public String toString() {
-
- return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM
- + "\nSTORMtoDB map:" + STORMtoDB;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/FileTransferData.java b/src/main/java/it/grid/storm/catalogs/FileTransferData.java
deleted file mode 100644
index 505b7cba0..000000000
--- a/src/main/java/it/grid/storm/catalogs/FileTransferData.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TTURL;
-
-public interface FileTransferData extends SynchMultyOperationRequestData {
-
- /**
- * Method that returns a TURLPrefix containing the transfer protocols desired
- * for this chunk of the srm request.
- */
- public TURLPrefix getTransferProtocols();
-
- /**
- * Method that returns the TURL for this chunk of the srm request.
- */
- public TTURL getTransferURL();
-
- /**
- * Method used to set the transferURL associated to the SURL of this chunk. If
- * TTURL is null, then nothing gets set!
- */
- public void setTransferURL(final TTURL turl);
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java
deleted file mode 100644
index 7a06f1db9..000000000
--- a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.griduser.GridUserInterface;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TTURL;
-import it.grid.storm.synchcall.data.IdentityInputData;
-
-public class IdentityPtGData extends AnonymousPtGData implements
- IdentityInputData {
-
- private final GridUserInterface auth;
-
- /**
- * @param requestToken
- * @param fromSURL
- * @param lifeTime
- * @param dirOption
- * @param desiredProtocols
- * @param fileSize
- * @param status
- * @param transferURL
- * @throws InvalidPtGDataAttributesException
- */
- public IdentityPtGData(GridUserInterface auth, TSURL SURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL) throws InvalidPtGDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException, IllegalArgumentException {
-
- super(SURL, lifeTime, dirOption, desiredProtocols, fileSize, status,
- transferURL);
- if (auth == null) {
- throw new IllegalArgumentException(
- "Unable to create the object, invalid arguments: auth=" + auth);
- }
- this.auth = auth;
- }
-
- @Override
- public GridUserInterface getUser() {
-
- return auth;
- }
-
- @Override
- public String getPrincipal() {
-
- return this.auth.getDn();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java
deleted file mode 100644
index af35bc9bb..000000000
--- a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- *
- */
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.griduser.GridUserInterface;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TTURL;
-import it.grid.storm.synchcall.data.IdentityInputData;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class IdentityPtPData extends AnonymousPtPData implements
- IdentityInputData {
-
- private final GridUserInterface auth;
-
- /**
- * @param requestToken
- * @param fromSURL
- * @param lifeTime
- * @param dirOption
- * @param desiredProtocols
- * @param fileSize
- * @param status
- * @param transferURL
- * @throws InvalidPtGDataAttributesException
- */
- public IdentityPtPData(GridUserInterface auth, TSURL SURL,
- TLifeTimeInSeconds pinLifetime, TLifeTimeInSeconds fileLifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TSizeInBytes expectedFileSize, TURLPrefix transferProtocols,
- TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL)
- throws InvalidPtPDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException, IllegalArgumentException {
-
- super(SURL, pinLifetime, fileLifetime, fileStorageType, spaceToken,
- expectedFileSize, transferProtocols, overwriteOption, status, transferURL);
- if (auth == null) {
- throw new IllegalArgumentException(
- "Unable to create the object, invalid arguments: auth=" + auth);
- }
- this.auth = auth;
- }
-
- @Override
- public GridUserInterface getUser() {
-
- return auth;
- }
-
- @Override
- public String getPrincipal() {
-
- return this.auth.getDn();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java
deleted file mode 100644
index 5e782876e..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of BoLChunkData are invalid, that is if any of the following is
- * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols,
- * fileSize, status, transferURL.
- *
- * @author CNAF
- * @date Aug 2009
- * @version 1.0
- */
-public class InvalidBoLChunkDataAttributesException extends Exception {
-
- private static final long serialVersionUID = 5657310881067434280L;
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullRequestToken;
- private boolean nullFromSURL;
- private boolean nullLifeTime;
- private boolean nullDirOption;
- private boolean nullTransferProtocols;
- private boolean nullFileSize;
- private boolean nullStatus;
- private boolean nullTransferURL;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidBoLChunkDataAttributesException(TRequestToken requestToken,
- TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL) {
-
- nullRequestToken = requestToken == null;
- nullFromSURL = fromSURL == null;
- nullLifeTime = lifeTime == null;
- nullDirOption = dirOption == null;
- nullTransferProtocols = transferProtocols == null;
- nullFileSize = fileSize == null;
- nullStatus = status == null;
- nullTransferURL = transferURL == null;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid BoLChunkData attributes: null-requestToken=");
- sb.append(nullRequestToken);
- sb.append("; nul-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-lifeTime=");
- sb.append(nullLifeTime);
- sb.append("; null-dirOption=");
- sb.append(nullDirOption);
- sb.append("; null-transferProtocols=");
- sb.append(nullTransferProtocols);
- sb.append("; null-fileSize=");
- sb.append(nullFileSize);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append("; null-transferURL=");
- sb.append(nullTransferURL);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java
deleted file mode 100644
index 41a9a9afc..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java
+++ /dev/null
@@ -1,94 +0,0 @@
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidBoLDataAttributesException extends
- InvalidFileTransferDataAttributesException {
-
- private static final long serialVersionUID = 8113403994527678088L;
- // booleans that indicate whether the corresponding variable is null
- protected boolean nullLifeTime;
- protected boolean nullDirOption;
- protected boolean nullFileSize;
-
- public InvalidBoLDataAttributesException(TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL) {
-
- super(fromSURL, transferProtocols, status, transferURL);
- init(lifeTime, dirOption, fileSize);
- }
-
- public InvalidBoLDataAttributesException(TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL, String message) {
-
- super(fromSURL, transferProtocols, status, transferURL, message);
- init(lifeTime, dirOption, fileSize);
- }
-
- public InvalidBoLDataAttributesException(TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL, Throwable cause) {
-
- super(fromSURL, transferProtocols, status, transferURL, cause);
- init(lifeTime, dirOption, fileSize);
- }
-
- public InvalidBoLDataAttributesException(TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL, String message, Throwable cause) {
-
- super(fromSURL, transferProtocols, status, transferURL, message, cause);
- init(lifeTime, dirOption, fileSize);
- }
-
- private void init(TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TSizeInBytes fileSize) {
-
- nullLifeTime = lifeTime == null;
- nullDirOption = dirOption == null;
- nullFileSize = fileSize == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidBoLDataAttributesException [nullLifeTime=");
- builder.append(nullLifeTime);
- builder.append(", nullDirOption=");
- builder.append(nullDirOption);
- builder.append(", nullFileSize=");
- builder.append(nullFileSize);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java
deleted file mode 100644
index a47e5433c..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of PtPChunkData are invalid, that is if any of the following is
- * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken,
- * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize,
- * status, transferURL.
- *
- * @author EGRID - ICTP Trieste
- * @date June, 2005
- * @version 2.0
- */
-public class InvalidBoLPersistentChunkDataAttributesException extends
- InvalidBoLDataAttributesException {
-
- private static final long serialVersionUID = -5117535717125685975L;
- /**
- * booleans that indicate whether the corresponding variable is null
- */
- boolean nullRequestToken;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidBoLPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime,
- TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize,
- TReturnStatus status, TTURL transferURL) {
-
- super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status,
- transferURL);
- nullRequestToken = requestToken == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidBoLPersistentChunkDataAttributesException [nullRequestToken=");
- builder.append(nullRequestToken);
- builder.append(", nullLifeTime=");
- builder.append(nullLifeTime);
- builder.append(", nullDirOption=");
- builder.append(nullDirOption);
- builder.append(", nullFileSize=");
- builder.append(nullFileSize);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java
deleted file mode 100644
index 01363fd8e..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of CopyChunkData are invalid, that is if any of the following is
- * _null_: requestToken, fromsURL, toSURL, lifetime, fileStorageType,
- * spaceToken, overwriteOption, status.
- *
- * @author EGRID - ICTP Trieste
- * @date September, 2005
- * @version 2.0
- */
-public class InvalidCopyChunkDataAttributesException extends Exception {
-
- private static final long serialVersionUID = 6786154038995023512L;
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullRequestToken;
- private boolean nullFromSURL;
- private boolean nullToSURL;
- private boolean nullLifetime;
- private boolean nullFileStorageType;
- private boolean nullSpaceToken;
- private boolean nullOverwriteOption;
- private boolean nullStatus;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidCopyChunkDataAttributesException(TRequestToken requestToken,
- TSURL fromSURL, TSURL toSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption, TReturnStatus status) {
-
- nullRequestToken = requestToken == null;
- nullFromSURL = fromSURL == null;
- nullToSURL = toSURL == null;
- nullLifetime = lifetime == null;
- nullFileStorageType = fileStorageType == null;
- nullSpaceToken = spaceToken == null;
- nullOverwriteOption = overwriteOption == null;
- nullStatus = status == null;
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid CopyChunkData attributes: null-requestToken=");
- sb.append(nullRequestToken);
- sb.append("; null-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-toSURL=");
- sb.append(nullToSURL);
- sb.append("; null-lifetime=");
- sb.append(nullLifetime);
- sb.append("; null-filestorageType=");
- sb.append(nullFileStorageType);
- sb.append("; null-spaceToken=");
- sb.append(nullSpaceToken);
- sb.append("; null-overwriteOption=");
- sb.append(nullOverwriteOption);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java
deleted file mode 100644
index 8af415056..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidCopyDataAttributesException extends
- InvalidSurlRequestDataAttributesException {
-
- private static final long serialVersionUID = -1217486426437414490L;
- protected boolean nullDestinationSURL;
- protected boolean nullLifetime;
- protected boolean nullFileStorageType;
- protected boolean nullSpaceToken;
- protected boolean nullOverwriteOption;
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) {
-
- super(SURL, status);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message) {
-
- super(SURL, status, message);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, Throwable cause) {
-
- super(SURL, status, cause);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message, Throwable cause) {
-
- super(SURL, status, message, cause);
- init(destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption);
- }
-
- private void init(TSURL destinationSURL, TLifeTimeInSeconds lifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TOverwriteMode overwriteOption) {
-
- nullDestinationSURL = destinationSURL == null;
- nullLifetime = lifetime == null;
- nullFileStorageType = fileStorageType == null;
- nullSpaceToken = spaceToken == null;
- nullOverwriteOption = overwriteOption == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidCopyDataAttributesException [nullDestinationSURL=");
- builder.append(nullDestinationSURL);
- builder.append(", nullLifetime=");
- builder.append(nullLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java
deleted file mode 100644
index 77cdb8dcd..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSpaceToken;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidCopyPersistentChunkDataAttributesException extends
- InvalidCopyDataAttributesException {
-
- /**
- *
- */
- private static final long serialVersionUID = 1266996505954208061L;
- private boolean nullRequestToken;
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, message);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, Throwable cause) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, cause);
- init(requestToken);
- }
-
- public InvalidCopyPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL SURL, TSURL destinationSURL,
- TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TOverwriteMode overwriteOption,
- TReturnStatus status, String message, Throwable cause) {
-
- super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken,
- overwriteOption, status, message, cause);
- init(requestToken);
- }
-
- private void init(TRequestToken requestToken) {
-
- nullRequestToken = requestToken == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidCopyPersistentChunkDataAttributesException [nullRequestToken=");
- builder.append(nullRequestToken);
- builder.append(", nullDestinationSURL=");
- builder.append(nullDestinationSURL);
- builder.append(", nullLifetime=");
- builder.append(nullLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java
deleted file mode 100644
index fc28c0743..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * @author Michele Dibenedetto
- */
-public class InvalidFileTransferDataAttributesException extends
- InvalidSurlRequestDataAttributesException {
-
- private static final long serialVersionUID = 4416318501544415810L;
- protected boolean nullTransferProtocols;
- protected boolean nullTransferURL;
-
- public InvalidFileTransferDataAttributesException(TSURL SURL,
- TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL) {
-
- super(SURL, status);
- init(transferProtocols, transferURL);
- }
-
- public InvalidFileTransferDataAttributesException(TSURL SURL,
- TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL,
- String message) {
-
- super(SURL, status, message);
- init(transferProtocols, transferURL);
- }
-
- public InvalidFileTransferDataAttributesException(TSURL SURL,
- TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL,
- Throwable cause) {
-
- super(SURL, status, cause);
- init(transferProtocols, transferURL);
- }
-
- public InvalidFileTransferDataAttributesException(TSURL SURL,
- TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL,
- String message, Throwable cause) {
-
- super(SURL, status, message, cause);
- init(transferProtocols, transferURL);
- }
-
- private void init(TURLPrefix transferProtocols, TTURL transferURL) {
-
- nullTransferProtocols = transferProtocols == null;
- nullTransferURL = transferURL == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidFileTransferDataAttributesException [nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java
deleted file mode 100644
index 42ed5c4eb..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of PtGChunkData are invalid, that is if any of the following is
- * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols,
- * fileSize, status, transferURL.
- *
- * @author EGRID - ICTP Trieste
- * @date March 23rd, 2005
- * @version 3.0
- */
-public class InvalidPtGDataAttributesException extends
- InvalidFileTransferDataAttributesException {
-
- private static final long serialVersionUID = -3484929474636108262L;
- // booleans that indicate whether the corresponding variable is null
- protected boolean nullLifeTime;
- protected boolean nullDirOption;
- protected boolean nullFileSize;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidPtGDataAttributesException(TSURL fromSURL,
- TLifeTimeInSeconds lifeTime, TDirOption dirOption,
- TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status,
- TTURL transferURL) {
-
- super(fromSURL, transferProtocols, status, transferURL);
- nullLifeTime = lifeTime == null;
- nullDirOption = dirOption == null;
- nullFileSize = fileSize == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidPtGChunkDataAttributesException [nullLifeTime=");
- builder.append(nullLifeTime);
- builder.append(", nullDirOption=");
- builder.append(nullDirOption);
- builder.append(", nullFileSize=");
- builder.append(nullFileSize);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java
deleted file mode 100644
index 9ab9dcadb..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of PtPChunkData are invalid, that is if any of the following is
- * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken,
- * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize,
- * status, transferURL.
- *
- * @author EGRID - ICTP Trieste
- * @date June, 2005
- * @version 2.0
- */
-public class InvalidPtGPersistentChunkDataAttributesException extends
- InvalidPtGDataAttributesException {
-
- private static final long serialVersionUID = -5117535717125685975L;
- /**
- * booleans that indicate whether the corresponding variable is null
- */
- boolean nullRequestToken;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidPtGPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime,
- TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize,
- TReturnStatus status, TTURL transferURL) {
-
- super(fromSURL, lifeTime, dirOption, transferProtocols, fileSize, status,
- transferURL);
- nullRequestToken = requestToken == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidPtGPersistentChunkDataAttributesException [nullRequestToken=");
- builder.append(nullRequestToken);
- builder.append(", nullLifeTime=");
- builder.append(nullLifeTime);
- builder.append(", nullDirOption=");
- builder.append(nullDirOption);
- builder.append(", nullFileSize=");
- builder.append(nullFileSize);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java
deleted file mode 100644
index 55d445e35..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * @author Michele Dibenedetto
- */
-public class InvalidPtPDataAttributesException extends
- InvalidFileTransferDataAttributesException {
-
- /**
- *
- */
- private static final long serialVersionUID = 1051060981188652979L;
- protected boolean nullSpaceToken;
- protected boolean nullPinLifetime;
- protected boolean nullFileLifetime;
- protected boolean nullFileStorageType;
- protected boolean nullKnownSizeOfThisFile;
- protected boolean nullOverwriteOption;
-
- public InvalidPtPDataAttributesException(TSURL toSURL,
- TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols,
- TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) {
-
- super(toSURL, transferProtocols, status, transferURL);
- init(spaceToken, fileLifetime, pinLifetime, fileStorageType,
- knownSizeOfThisFile, overwriteOption);
- }
-
- public InvalidPtPDataAttributesException(TSURL toSURL,
- TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols,
- TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL,
- String message) {
-
- super(toSURL, transferProtocols, status, transferURL, message);
- init(spaceToken, fileLifetime, pinLifetime, fileStorageType,
- knownSizeOfThisFile, overwriteOption);
- }
-
- public InvalidPtPDataAttributesException(TSURL toSURL,
- TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols,
- TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL,
- Throwable cause) {
-
- super(toSURL, transferProtocols, status, transferURL, cause);
- init(spaceToken, fileLifetime, pinLifetime, fileStorageType,
- knownSizeOfThisFile, overwriteOption);
- }
-
- public InvalidPtPDataAttributesException(TSURL toSURL,
- TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime,
- TFileStorageType fileStorageType, TSpaceToken spaceToken,
- TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols,
- TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL,
- String message, Throwable cause) {
-
- super(toSURL, transferProtocols, status, transferURL, message, cause);
- init(spaceToken, fileLifetime, pinLifetime, fileStorageType,
- knownSizeOfThisFile, overwriteOption);
- }
-
- private void init(TSpaceToken spaceToken, TLifeTimeInSeconds fileLifetime,
- TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType,
- TSizeInBytes knownSizeOfThisFile, TOverwriteMode overwriteOption) {
-
- nullSpaceToken = spaceToken == null;
- nullPinLifetime = pinLifetime == null;
- nullFileLifetime = fileLifetime == null;
- nullFileStorageType = fileStorageType == null;
- nullKnownSizeOfThisFile = knownSizeOfThisFile == null;
- nullOverwriteOption = overwriteOption == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidPtPDataAttributesException [nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullPinLifetime=");
- builder.append(nullPinLifetime);
- builder.append(", nullFileLifetime=");
- builder.append(nullFileLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullKnownSizeOfThisFile=");
- builder.append(nullKnownSizeOfThisFile);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java
deleted file mode 100644
index cb191a997..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
- * applicable law or agreed to in writing, software distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
- * OF ANY KIND, either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TSpaceToken;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TTURL;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of PtPChunkData are invalid, that is if any of the following is
- * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken,
- * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize,
- * status, transferURL.
- *
- * @author EGRID - ICTP Trieste
- * @date June, 2005
- * @version 2.0
- */
-public class InvalidPtPPersistentChunkDataAttributesException extends
- InvalidPtPDataAttributesException {
-
- private static final long serialVersionUID = -5117535717125685975L;
- /**
- * booleans that indicate whether the corresponding variable is null
- */
- boolean nullRequestToken;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidPtPPersistentChunkDataAttributesException(
- TRequestToken requestToken, TSURL toSURL, TLifeTimeInSeconds fileLifetime,
- TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType,
- TSpaceToken spaceToken, TSizeInBytes knownSizeOfThisFile,
- TURLPrefix transferProtocols, TOverwriteMode overwriteOption,
- TReturnStatus status, TTURL transferURL) {
-
- super(toSURL, fileLifetime, pinLifetime, fileStorageType, spaceToken,
- knownSizeOfThisFile, transferProtocols, overwriteOption, status,
- transferURL);
- nullRequestToken = requestToken == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder
- .append("InvalidPtPPersistentChunkDataAttributesException [nullRequestToken=");
- builder.append(nullRequestToken);
- builder.append(", nullSpaceToken=");
- builder.append(nullSpaceToken);
- builder.append(", nullPinLifetime=");
- builder.append(nullPinLifetime);
- builder.append(", nullFileLifetime=");
- builder.append(nullFileLifetime);
- builder.append(", nullFileStorageType=");
- builder.append(nullFileStorageType);
- builder.append(", nullKnownSizeOfThisFile=");
- builder.append(nullKnownSizeOfThisFile);
- builder.append(", nullOverwriteOption=");
- builder.append(nullOverwriteOption);
- builder.append(", nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullTransferProtocols=");
- builder.append(nullTransferProtocols);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append(", nullTransferURL=");
- builder.append(nullTransferURL);
- builder.append("]");
- return builder.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java
deleted file mode 100644
index bd1e35c03..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TReturnStatus;
-
-/**
- * This class represents an exception thrown when the attributes supplied to the
- * constructor of ReducedCopyChunkData are invalid, that is if any of the
- * following is _null_: fromsURL, toSURL, status.
- *
- * @author Michele Dibenedetto
- */
-@SuppressWarnings("serial")
-public class InvalidReducedCopyChunkDataAttributesException extends Exception {
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullFromSURL;
- private boolean nullToSURL;
- private boolean nullStatus;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidReducedCopyChunkDataAttributesException(TSURL fromSURL,
- TSURL toSURL, TReturnStatus status) {
-
- nullFromSURL = fromSURL == null;
- nullToSURL = toSURL == null;
- nullStatus = status == null;
- }
-
- @Override
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid CopyChunkData attributes: null-requestToken=");
- sb.append("; null-fromSURL=");
- sb.append(nullFromSURL);
- sb.append("; null-toSURL=");
- sb.append(nullToSURL);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java
deleted file mode 100644
index 9b2847b73..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TFileStorageType;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-
-/**
- * This class represents an exceptin thrown when the attributes supplied to the
- * constructor of ReducedPtPChunkData are invalid, that is if any is _null_.
- *
- * @author EGRID - ICTP Trieste
- * @date January, 2007
- * @version 1.0
- */
-public class InvalidReducedPtPChunkDataAttributesException extends Exception {
-
- private static final long serialVersionUID = 4945626188325362854L;
-
- // booleans that indicate whether the corresponding variable is null
- private boolean nullToSURL;
- private boolean nullStatus;
- private boolean nullFileStorageType;
- private boolean nullFileLifetime;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidReducedPtPChunkDataAttributesException(TSURL toSURL,
- TReturnStatus status, TFileStorageType fileStorageType,
- TLifeTimeInSeconds fileLifetime) {
-
- nullFileStorageType = fileStorageType == null;
- nullToSURL = toSURL == null;
- nullStatus = status == null;
- nullFileLifetime = fileLifetime == null;
- }
-
- @Override
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid PtPChunkData attributes: null-toSURL=");
- sb.append(nullToSURL);
- sb.append("; null-status=");
- sb.append(nullStatus);
- sb.append("; null-fileStorageType=");
- sb.append(nullFileStorageType);
- sb.append("; null-fileLifetime=");
- sb.append(nullFileLifetime);
- sb.append(".");
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java
deleted file mode 100644
index 6021de690..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestType;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.griduser.GridUserInterface;
-
-/**
- * This class represents an Exception thrown when a RequestSummaryData object is
- * created with any invalid attributes: null TRequestType, null TRequestToken,
- * null VomsGridUser.
- *
- * @author EGRID - ICTP Trieste
- * @date March 18th, 2005
- * @version 3.0
- */
-public class InvalidRequestSummaryDataAttributesException extends Exception {
-
- private static final long serialVersionUID = -7729349713696058669L;
-
- // booleans true if the corresponding variablesare null or negative
- private boolean nullRequestType = true;
- private boolean nullRequestToken = true;
- private boolean nullVomsGridUser = true;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidRequestSummaryDataAttributesException(TRequestType requestType,
- TRequestToken requestToken, GridUserInterface gu) {
-
- nullRequestType = (requestType == null);
- nullRequestToken = (requestToken == null);
- nullVomsGridUser = (gu == null);
- }
-
- @Override
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append("Invalid RequestSummaryData attributes exception: ");
- sb.append("nullRequestType=");
- sb.append(nullRequestType);
- sb.append("; nullRequestToken=");
- sb.append(nullRequestToken);
- sb.append("; nullVomsGridUser=");
- sb.append(nullVomsGridUser);
- return sb.toString();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java b/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java
deleted file mode 100644
index 5a252b65f..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * is asked to retrieve info from the persistence but the raw data is invalid
- * and does not allow a well-formed domain obejcts to be created.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class InvalidRetrievedDataException extends Exception {
-
- private static final long serialVersionUID = -3645913441787012438L;
-
- private String requestToken;
- private String requestType;
- private int totalFilesInThisRequest;
- private int numOfQueuedRequests;
- private int numOfProgressing;
- private int numFinished;
- private boolean isSuspended;
-
- /**
- * Constructor that requires the attributes that caused the exception to be
- * thrown.
- */
- public InvalidRetrievedDataException(String requestToken, String requestType,
- int totalFilesInThisRequest, int numOfQueuedRequests,
- int numOfProgressingRequests, int numFinished, boolean isSuspended) {
-
- this.requestToken = requestToken;
- this.requestType = requestType;
- this.totalFilesInThisRequest = totalFilesInThisRequest;
- this.numOfQueuedRequests = numOfQueuedRequests;
- this.numOfProgressing = numOfProgressingRequests;
- this.numFinished = numFinished;
- this.isSuspended = isSuspended;
- }
-
- public String toString() {
-
- return "InvalidRetrievedDataException: token=" + requestToken + " type="
- + requestType + " total-files=" + totalFilesInThisRequest + " queued="
- + numOfQueuedRequests + " progressing=" + numOfProgressing + " finished="
- + numFinished + " isSusp=" + isSuspended;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java
deleted file mode 100644
index 363d6d895..000000000
--- a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-
-/**
- * @author Michele Dibenedetto
- *
- */
-public class InvalidSurlRequestDataAttributesException extends Exception {
-
- private static final long serialVersionUID = -8636768167720753989L;
- protected boolean nullSURL;
- protected boolean nullStatus;
-
- public InvalidSurlRequestDataAttributesException(TSURL SURL,
- TReturnStatus status) {
-
- super();
- init(SURL, status);
- }
-
- public InvalidSurlRequestDataAttributesException(TSURL SURL,
- TReturnStatus status, String message) {
-
- super(message);
- init(SURL, status);
- }
-
- public InvalidSurlRequestDataAttributesException(TSURL SURL,
- TReturnStatus status, Throwable cause) {
-
- super(cause);
- init(SURL, status);
- }
-
- public InvalidSurlRequestDataAttributesException(TSURL SURL,
- TReturnStatus status, String message, Throwable cause) {
-
- super(message, cause);
- init(SURL, status);
- }
-
- private void init(TSURL SURL, TReturnStatus status) {
-
- nullSURL = SURL == null;
- nullStatus = status == null;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("InvalidSurlRequestDataAttributesException [nullSURL=");
- builder.append(nullSURL);
- builder.append(", nullStatus=");
- builder.append(nullStatus);
- builder.append("]");
- return builder.toString();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/JiTData.java b/src/main/java/it/grid/storm/catalogs/JiTData.java
deleted file mode 100644
index 4c3e4eaee..000000000
--- a/src/main/java/it/grid/storm/catalogs/JiTData.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-/**
- * Class that represents data associated to JiT entries. It contains a String
- * representing the file, an int representing the ACL, an int representing the
- * user UID, an int representing the user GID.
- *
- * @author EGRID - ICTP Trieste
- * @version 1.0
- * @date November 2006
- */
-public class JiTData {
-
- private String file = "";
- private int uid = -1;
- private int gid = -1;
- private int acl = -1;
-
- /**
- * Constructor requiring the complete name of the file as String, the acl as
- * int, the uid and primary gid of the LocalUser bith as int.
- */
- public JiTData(String file, int acl, int uid, int gid) {
-
- this.file = file;
- this.acl = acl;
- this.uid = uid;
- this.gid = gid;
- }
-
- public String pfn() {
-
- return file;
- }
-
- public int acl() {
-
- return acl;
- }
-
- public int uid() {
-
- return uid;
- }
-
- public int gid() {
-
- return gid;
- }
-
- public String toString() {
-
- return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java b/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java
deleted file mode 100644
index 134b13ff2..000000000
--- a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * finds more than one row of data for the specified request.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class MultipleDataEntriesException extends Exception {
-
- private static final long serialVersionUID = 427636739469695868L;
-
- private TRequestToken requestToken;
-
- /**
- * Constructor tha trequires the attributes that caused the exception to be
- * thrown.
- */
- public MultipleDataEntriesException(TRequestToken requestToken) {
-
- this.requestToken = requestToken;
- }
-
- public String toString() {
-
- return "MultipleDataEntriesException: requestToken=" + requestToken;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java b/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java
deleted file mode 100644
index bc44544a9..000000000
--- a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TRequestToken;
-
-/**
- * Class that represents an Exception thrown by the ReservedSpaceCatalog when it
- * finds no data for the specified request.
- *
- * @author: EGRID ICTP
- * @version: 1.0
- * @date: June 2005
- */
-public class NoDataFoundException extends Exception {
-
- private static final long serialVersionUID = -718255813130266566L;
-
- private TRequestToken requestToken;
-
- /**
- * Constructor tha trequires the attributes that caused the exception to be
- * thrown.
- */
- public NoDataFoundException(TRequestToken requestToken) {
-
- this.requestToken = requestToken;
- }
-
- public String toString() {
-
- return "NoDataFoundException: requestToken=" + requestToken;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java b/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java
deleted file mode 100644
index ddcf6eda6..000000000
--- a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import it.grid.storm.srm.types.TOverwriteMode;
-import it.grid.storm.config.Configuration;
-
-/**
- * Package private auxiliary class used to convert between DB and StoRM object
- * model representation of TOverwriteMode.
- *
- * @author: EGRID ICTP
- * @version: 2.0
- * @date: June 2005
- */
-public class OverwriteModeConverter {
-
- private Map DBtoSTORM = new HashMap();
- private Map STORMtoDB = new HashMap();
-
- private static OverwriteModeConverter c = new OverwriteModeConverter();
-
- /**
- * Private constructor that fills in the conversion table; in particular, DB
- * uses String values to represent TOverwriteMode:
- *
- * N NEVER A ALWAYS D WHENFILESAREDIFFERENT
- */
- private OverwriteModeConverter() {
-
- DBtoSTORM.put("N", TOverwriteMode.NEVER);
- DBtoSTORM.put("A", TOverwriteMode.ALWAYS);
- DBtoSTORM.put("D", TOverwriteMode.WHENFILESAREDIFFERENT);
- Object aux;
- for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) {
- aux = i.next();
- STORMtoDB.put(DBtoSTORM.get(aux), aux);
- }
- }
-
- /**
- * Method that returns the only instance of OverwriteModeConverter.
- */
- public static OverwriteModeConverter getInstance() {
-
- return c;
- }
-
- /**
- * Method that returns the int used by DPM to represent the given
- * TOverwriteMode. "" is returned if no match is found.
- */
- public String toDB(TOverwriteMode om) {
-
- String aux = (String) STORMtoDB.get(om);
- if (aux == null)
- return "";
- return aux;
- }
-
- /**
- * Method that returns the TOverwriteMode used by StoRM to represent the
- * supplied String representation of DPM. A configured default TOverwriteMode
- * is returned in case no corresponding StoRM type is found.
- * TOverwriteMode.EMPTY is returned if there are configuration errors.
- */
- public TOverwriteMode toSTORM(String s) {
-
- TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s);
- if (aux == null)
- aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance()
- .getDefaultOverwriteMode());
- if (aux == null)
- return TOverwriteMode.EMPTY;
- else
- return aux;
- }
-
- public String toString() {
-
- return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM
- + "\nSTORMtoDB map:" + STORMtoDB;
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java
deleted file mode 100644
index 59f59d81e..000000000
--- a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package it.grid.storm.catalogs;
-
-public interface PersistentChunkData extends ChunkData {
-
- /**
- * Method that returns the primary key in persistence, associated with This
- * Chunk.
- */
- public long getPrimaryKey();
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java
deleted file mode 100644
index 1904e57a2..000000000
--- a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.config.Configuration;
-
-/**
- * Class that handles DB representation of a TLifetimeInSeconds, in particular
- * it takes care of protocol specification:
- *
- * 0/null/negative are translated as default StoRM configurable values. StoRMs
- * Empty TLifeTimeInSeconds is translated as 0.
- *
- * @author EGRID ICTP
- * @version 1.0
- * @date March 2007
- */
-public class PinLifetimeConverter {
-
- private static PinLifetimeConverter stc = new PinLifetimeConverter(); // only
- // instance
-
- private PinLifetimeConverter() {
-
- }
-
- /**
- * Method that returns the only instance of SizeInBytesIntConverter
- */
- public static PinLifetimeConverter getInstance() {
-
- return stc;
- }
-
- /**
- * Method that translates the Empty TLifeTimeInSeconds into the empty
- * representation of DB which is 0. Any other value is left as is.
- */
- public int toDB(long l) {
-
- if (l == TLifeTimeInSeconds.makeEmpty().value())
- return 0;
- return new Long(l).intValue();
- }
-
- /**
- * Method that returns the long corresponding to the int value in the DB,
- * except if it is 0, NULL or negative; a configurable default value is
- * returned instead, corresponding to the getPinLifetimeMinimum()
- * Configuration class method.
- */
- public long toStoRM(int s) {
-
- if (s == 0) {
- return Configuration.getInstance().getPinLifetimeDefault();
- } else if (s < 0) {
- // The default is used also as a Minimum
- return Configuration.getInstance().getPinLifetimeDefault();
- }
- return new Integer(s).longValue();
- }
-
- public long toStoRM(long s) {
-
- if (s == 0) {
- return Configuration.getInstance().getPinLifetimeDefault();
- } else if (s < 0) {
- // The default is used also as a Minimum
- return Configuration.getInstance().getPinLifetimeDefault();
- }
- return s;
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
index 307ed1a19..91d35564b 100644
--- a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java
@@ -17,6 +17,12 @@
package it.grid.storm.catalogs;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import it.grid.storm.common.types.SizeUnit;
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
@@ -24,11 +30,21 @@
import it.grid.storm.griduser.AbstractGridUser;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.griduser.GridUserManager;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TURLConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.PtGChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql;
+import it.grid.storm.persistence.model.PtGChunkDataTO;
+import it.grid.storm.persistence.model.PtGPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedPtGChunkData;
+import it.grid.storm.persistence.model.ReducedPtGChunkDataTO;
import it.grid.storm.srm.types.InvalidTDirOptionAttributesException;
-import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
-import it.grid.storm.srm.types.InvalidTTURLAttributesException;
import it.grid.storm.srm.types.TDirOption;
import it.grid.storm.srm.types.TLifeTimeInSeconds;
import it.grid.storm.srm.types.TRequestToken;
@@ -38,820 +54,349 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
- * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and
- * provides methods for looking up a PtGChunkData based on TRequestToken, as
- * well as for adding a new entry and removing an existing one.
+ * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and provides methods for
+ * looking up a PtGChunkData based on TRequestToken, as well as for adding a new entry and removing
+ * an existing one.
*
* @author EGRID - ICTP Trieste
* @date April 26th, 2005
* @version 4.0
*/
-@SuppressWarnings("unused")
public class PtGChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(PtGChunkCatalog.class);
-
- /* Only instance of PtGChunkCatalog present in StoRM! */
- private static final PtGChunkCatalog cat = new PtGChunkCatalog();
- private final PtGChunkDAO dao = PtGChunkDAO.getInstance();
-
- /*
- * Timer object in charge of transiting expired requests from SRM_FILE_PINNED
- * to SRM_RELEASED!
- */
- private final Timer transiter = new Timer();
- /* Delay time before starting cleaning thread! */
- private final long delay = Configuration.getInstance()
- .getTransitInitialDelay() * 1000;
- /* Period of execution of cleaning! */
- private final long period = Configuration.getInstance()
- .getTransitTimeInterval() * 1000;
-
- /**
- * Private constructor that starts the internal timer needed to periodically
- * check and transit requests whose pinLifetime has expired and are in
- * SRM_FILE_PINNED, to SRM_RELEASED.
- */
- private PtGChunkCatalog() {
-
- TimerTask transitTask = new TimerTask() {
-
- @Override
- public void run() {
-
- transitExpiredSRM_FILE_PINNED();
- }
- };
- transiter.scheduleAtFixedRate(transitTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of PtGChunkCatalog available.
- */
- public static PtGChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved PtGChunkData. In case
- * any error occurs, the operation does not proceed but no Exception is
- * thrown. Error messages get logged.
- *
- * Only fileSize, StatusCode, errString and transferURL are updated. Likewise
- * for the request pinLifetime.
- */
- synchronized public void update(PtGPersistentChunkData chunkData) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- /* Primary key needed by DAO Object */
- to.setPrimaryKey(chunkData.getPrimaryKey());
- to.setFileSize(chunkData.getFileSize().value());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setTurl(TURLConverter.getInstance().toDB(
- chunkData.getTransferURL().toString()));
- to.setLifeTime(PinLifetimeConverter.getInstance().toDB(
- chunkData.getPinLifeTime().value()));
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- dao.update(to);
- }
-
- /**
- * Refresh method. THIS IS A WORK IN PROGRESS!!!! This method have to synch
- * the ChunkData information with the database status intended as the status
- * code and the TURL
- *
- * @param auxTO
- * @param PtGChunkData
- * inputChunk
- * @return PtGChunkData outputChunk
- */
- synchronized public PtGPersistentChunkData refreshStatus(
- PtGPersistentChunkData inputChunk) {
-
- PtGChunkDataTO chunkDataTO = dao.refresh(inputChunk.getPrimaryKey());
-
- log.debug("PtG CHUNK CATALOG: retrieved data " + chunkDataTO);
- if (chunkDataTO == null) {
- log.warn("PtG CHUNK CATALOG! Empty TO found in persistence for specified "
- + "request: {}", inputChunk.getPrimaryKey());
- return inputChunk;
- }
-
- /*
- * In this first version the only field updated is the Status. Once
- * updated, the new status is rewritten into the input ChunkData
- */
-
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status());
- if (code != TStatusCode.EMPTY) {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- inputChunk.setStatus(status);
- TTURL turl = null;
- try {
- turl = TTURL.makeFromString(chunkDataTO.turl());
- } catch (InvalidTTURLAttributesException e) {
- log.info("PtGChunkCatalog (FALSE-ERROR-in-abort-refresh-status?):"
- + " built a TURL with protocol NULL (retrieved from the DB..)");
- }
- inputChunk.setTransferURL(turl);
- return inputChunk;
- }
-
- /**
- * Method that returns a Collection of PtGChunkData Objects matching the
- * supplied TRequestToken.
- *
- * If any of the data associated to the TRequestToken is not well formed and
- * so does not allow a PtGChunkData Object to be created, then that part of
- * the request is dropped and gets logged, and the processing continues with
- * the next part. All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks to process then an empty Collection is returned, and
- * a messagge gets logged.
- */
- synchronized public Collection lookup(TRequestToken rt) {
-
- Collection chunkTOs = dao.find(rt);
- log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs);
- ArrayList list = new ArrayList();
- if (chunkTOs.isEmpty()) {
- log.warn("PtG CHUNK CATALOG! No chunks found in persistence for "
- + "specified request: {}", rt);
- return list;
- }
- PtGPersistentChunkData chunk;
- for (PtGChunkDataTO chunkTO : chunkTOs) {
- chunk = makeOne(chunkTO, rt);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on DB "
- + "to the request: {}", e.getMessage());
- }
- }
- log.debug("PtG CHUNK CATALOG: returning " + list);
- return list;
- }
-
- /**
- * Generates a PtGChunkData from the received PtGChunkDataTO
- *
- * @param chunkDataTO
- * @param rt
- * @return
- */
- private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO,
- TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (chunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN());
- }
- if (chunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue());
- }
- // lifeTime
- TLifeTimeInSeconds lifeTime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- chunkDataTO.lifeTime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed."
- + " Drop the value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // dirOption
- TDirOption dirOption = null;
- try {
- dirOption = new TDirOption(chunkDataTO.dirOption(),
- chunkDataTO.allLevelRecursive(), chunkDataTO.numLevel());
- } catch (InvalidTDirOptionAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter
- .toSTORM(chunkDataTO.protocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols or could "
- + "not translate TransferProtocols!");
- /* fail construction of PtGChunkData! */
- transferProtocols = null;
- }
- // fileSize
- TSizeInBytes fileSize = null;
- try {
- fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- chunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + chunkDataTO.status());
- } else {
- status = new TReturnStatus(code, chunkDataTO.errString());
- }
- GridUserInterface gridUser = null;
- try {
- if (chunkDataTO.vomsAttributes() != null
- && !chunkDataTO.vomsAttributes().trim().equals("")) {
- gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(),
- chunkDataTO.vomsAttributesArray());
- } else {
- gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN());
- }
-
- } catch (IllegalArgumentException e) {
- log.error("Unexpected error on voms grid user creation."
- + " IllegalArgumentException: {}", e.getMessage(), e);
- }
- // transferURL
- /*
- * whatever is read is just meaningless because PtG will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make PtGChunkData
- PtGPersistentChunkData aux = null;
- try {
- aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime,
- dirOption, transferProtocols, fileSize, status, transferURL);
- aux.setPrimaryKey(chunkDataTO.primaryKey());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedPtGChunk(chunkDataTO);
- log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from "
- + "persistence. Dropping chunk from request {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedPtGChunkDataTO chunkTO,
- final ReducedPtGChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO,
- final PtGPersistentChunkData chunk)
- throws InvalidReducedPtGChunkDataAttributesException {
-
- ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedPtGChunkData from the data contained in the received
- * PtGChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtGChunkDataAttributesException
- */
- private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk)
- throws InvalidReducedPtGChunkDataAttributesException {
-
- ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(),
- chunk.getStatus());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedPtGChunkDataTO from the data contained in the received
- * PtGChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) {
-
- ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setFromSURL(chunkTO.fromSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received PtGChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(PtGChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.surlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- private boolean isComplete(ReducedPtGChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkData Objects associated
- * to the supplied TRequestToken.
- *
- * If any of the data retrieved for a given chunk is not well formed and so
- * does not allow a ReducedPtGChunkData Object to be created, then that chunk
- * is dropped and gets logged, while processing continues with the next one.
- * All valid chunks get returned: the others get dropped.
- *
- * If there are no chunks associated to the given TRequestToken, then an empty
- * Collection is returned and a message gets logged.
- */
- synchronized public Collection lookupReducedPtGChunkData(
- TRequestToken rt) {
-
- Collection reducedChunkDataTOs = dao.findReduced(rt
- .getValue());
- log.debug("PtG CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- ArrayList list = new ArrayList();
- if (reducedChunkDataTOs.isEmpty()) {
- log.debug("PtG CHUNK CATALOG! No chunks found in persistence for {}", rt);
- } else {
- ReducedPtGChunkData reducedChunkData = null;
- for (ReducedPtGChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtG CHUNK CATALOG: returning {}", list);
- }
- return list;
- }
-
- public Collection lookupReducedPtGChunkData(
- TRequestToken requestToken, Collection surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- requestToken, surlsUniqueIDs, surlsArray);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupPtGChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }), user);
- }
-
- public Collection lookupPtGChunkData(TSURL surl) {
-
- return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }));
- }
-
- public Collection lookupPtGChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- public Collection lookupPtGChunkData(List surls) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.find(surlsUniqueIDs,
- surlsArray);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- PtGPersistentChunkData chunk;
- for (PtGChunkDataTO chunkTO : chunkDataTOCollection) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(this.completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: ", e.getMessage());
- }
- }
- return list;
- }
-
- private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkData Objects matching
- * the supplied GridUser and Collection of TSURLs. If any of the data
- * retrieved for a given chunk is not well formed and so does not allow a
- * ReducedPtGChunkData Object to be created, then that chunk is dropped and
- * gets logged, while processing continues with the next one. All valid chunks
- * get returned: the others get dropped. If there are no chunks associated to
- * the given GridUser and Collection of TSURLs, then an empty Collection is
- * returned and a message gets logged.
- */
- synchronized public Collection lookupReducedPtGChunkData(
- GridUserInterface gu, Collection tsurlCollection) {
-
- int[] surlsUniqueIDs = new int[tsurlCollection.size()];
- String[] surls = new String[tsurlCollection.size()];
- int index = 0;
- for (TSURL tsurl : tsurlCollection) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOCollection = dao.findReduced(
- gu.getDn(), surlsUniqueIDs, surls);
- log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection);
- return buildReducedChunkDataList(chunkDataTOCollection);
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedPtGChunkData reducedChunkData;
- for (ReducedPtGChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!isComplete(reducedChunkDataTO)) {
- completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtG CHUNK CATALOG: returning {}",list);
- return list;
- }
-
- /**
- *
- *
- * @param reducedChunkDataTO
- * @return
- */
- private ReducedPtGChunkData makeOneReduced(
- ReducedPtGChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL fromSURL = null;
- try {
- fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // make ReducedPtGChunkData
- ReducedPtGChunkData aux = null;
- try {
- aux = new ReducedPtGChunkData(fromSURL, status);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedPtGChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! Retrieved malformed Reduced PtG chunk "
- + "data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied PtGChunkData
- * gets the primary key changed to the value assigned in Persistence.
- *
- * This method is intended to be used by a recursive PtG request: the parent
- * request supplies a directory which must be expanded, so all new children
- * requests resulting from the files in the directory are added into
- * persistence.
- *
- * So this method does _not_ add a new SRM prepare_to_get request into the DB!
- *
- * The only children data written into the DB are: sourceSURL, TDirOption,
- * statusCode and explanation.
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! Proper messages get logged by underlaying DAO.
- */
- synchronized public void addChild(PtGPersistentChunkData chunkData) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- /* needed for now to find ID of request! Must be changed soon! */
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- /* add the entry and update the Primary Key field! */
- dao.addChild(to);
- /* set the assigned PrimaryKey! */
- chunkData.setPrimaryKey(to.primaryKey());
- }
-
- /**
- * Method used to add into Persistence a new entry. The supplied PtGChunkData
- * gets the primary key changed to the value assigned in the Persistence. The
- * method requires the GridUser to whom associate the added request.
- *
- * This method is intended to be used by an srmCopy request in push mode which
- * implies a local srmPtG. The only fields from PtGChunkData that are
- * considered are: the requestToken, the sourceSURL, the pinLifetime, the
- * dirOption, the protocolList, the status and error string.
- *
- * So this method _adds_ a new SRM prepare_to_get request into the DB!
- *
- * In case of any error the operation does not proceed, but no Exception is
- * thrown! The underlaying DAO logs proper error messagges.
- */
- synchronized public void add(PtGPersistentChunkData chunkData,
- GridUserInterface gu) {
-
- PtGChunkDataTO to = new PtGChunkDataTO();
- to.setRequestToken(chunkData.getRequestToken().toString());
- to.setFromSURL(chunkData.getSURL().toString());
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
-
- to.setLifeTime(new Long(chunkData.getPinLifeTime().value()).intValue());
- to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
- to.setDirOption(chunkData.getDirOption().isDirectory());
- to.setNumLevel(chunkData.getDirOption().getNumLevel());
- to.setProtocolList(TransferProtocolListConverter.toDB(chunkData
- .getTransferProtocols()));
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
-
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
-
- dao.addNew(to, gu.getDn()); // add the entry and update the Primary Key
- // field!
- chunkData.setPrimaryKey(to.primaryKey()); // set the assigned PrimaryKey!
- }
-
- /**
- * Method used to establish if in Persistence there is a PtGChunkData working
- * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case
- * true is returned. In case none are found or there is any problem, false is
- * returned. This method is intended to be used by srmMv.
- */
- synchronized public boolean isSRM_FILE_PINNED(TSURL surl) {
-
- return (dao.numberInSRM_FILE_PINNED(surl.uniqueId()) > 0);
-
- }
-
- /**
- * Method used to transit the specified Collection of ReducedPtGChunkData from
- * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not
- * transited. In case of any error nothing is done, but proper error messages
- * get logged by the underlaying DAO.
- */
- synchronized public void transitSRM_FILE_PINNEDtoSRM_RELEASED(
- Collection chunks, TRequestToken token) {
-
- if (chunks == null || chunks.isEmpty()) {
- return;
- }
- long[] primaryKeys = new long[chunks.size()];
- int index = 0;
- for (ReducedPtGChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
-
- }
- dao.transitSRM_FILE_PINNEDtoSRM_RELEASED(primaryKeys, token);
- for (ReducedPtGChunkData chunkData : chunks) {
- if (chunkData != null) {
- primaryKeys[index] = chunkData.primaryKey();
- index++;
- }
- }
- }
-
- /**
- * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of
- * all PtG Requests whose pinLifetime has expired and the state still has not
- * been changed (a user forgot to run srmReleaseFiles)!
- */
- synchronized public void transitExpiredSRM_FILE_PINNED() {
-
- List expiredSurls = dao.transitExpiredSRM_FILE_PINNED();
- }
-
- public void updateStatus(TRequestToken requestToken, TSURL surl,
- TStatusCode statusCode, String explanation) {
-
- dao.updateStatus(requestToken, new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, statusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TSURL surl,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, expectedStatusCode, newStatusCode,
- explanation);
-
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public void updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(PtGChunkCatalog.class);
+
+ private static PtGChunkCatalog instance;
+
+ public static synchronized PtGChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new PtGChunkCatalog();
+ }
+ return instance;
+ }
+
+ private final PtGChunkDAO dao;
+
+ /**
+ * Private constructor that starts the internal timer needed to periodically check and transit
+ * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED.
+ */
+ private PtGChunkCatalog() {
+
+ dao = PtGChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved PtGChunkData. In case any error occurs, the
+ * operation does not proceed but no Exception is thrown. Error messages get logged.
+ *
+ * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request
+ * pinLifetime.
+ */
+ synchronized public void update(PtGPersistentChunkData chunkData) {
+
+ PtGChunkDataTO to = new PtGChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(chunkData.getPrimaryKey());
+ to.setFileSize(chunkData.getFileSize().value());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setTurl(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString()));
+ to.setLifeTime(PinLifetimeConverter.getInstance().toDB(chunkData.getPinLifeTime().value()));
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ dao.update(to);
+ }
+
+ /**
+ * Method that returns a Collection of PtGChunkData Objects matching the supplied TRequestToken.
+ *
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * PtGChunkData Object to be created, then that part of the request is dropped and gets logged,
+ * and the processing continues with the next part. All valid chunks get returned: the others get
+ * dropped.
+ *
+ * If there are no chunks to process then an empty Collection is returned, and a messagge gets
+ * logged.
+ */
+ synchronized public Collection lookup(TRequestToken rt) {
+
+ Collection chunkTOs = dao.find(rt);
+ log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs);
+ ArrayList list = new ArrayList();
+ if (chunkTOs.isEmpty()) {
+ log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " + "specified request: {}",
+ rt);
+ return list;
+ }
+ PtGPersistentChunkData chunk;
+ for (PtGChunkDataTO chunkTO : chunkTOs) {
+ chunk = makeOne(chunkTO, rt);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(this.completeTO(chunkTO, chunk));
+ } catch (InvalidReducedPtGChunkDataAttributesException e) {
+ log.warn(
+ "PtG CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("PtG CHUNK CATALOG: returning " + list);
+ return list;
+ }
+
+ /**
+ * Generates a PtGChunkData from the received PtGChunkDataTO
+ *
+ * @param chunkDataTO
+ * @param rt
+ * @return
+ */
+ private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ TSURL fromSURL = null;
+ try {
+ fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (chunkDataTO.normalizedStFN() != null) {
+ fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN());
+ }
+ if (chunkDataTO.surlUniqueID() != null) {
+ fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue());
+ }
+ // lifeTime
+ TLifeTimeInSeconds lifeTime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(chunkDataTO.lifeTime());
+ // Check for max value allowed
+ long max = Configuration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed."
+ + " Drop the value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // dirOption
+ TDirOption dirOption = null;
+ try {
+ dirOption = new TDirOption(chunkDataTO.dirOption(), chunkDataTO.allLevelRecursive(),
+ chunkDataTO.numLevel());
+ } catch (InvalidTDirOptionAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols =
+ TransferProtocolListConverter.toSTORM(chunkDataTO.protocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols or could " + "not translate TransferProtocols!");
+ /* fail construction of PtGChunkData! */
+ transferProtocols = null;
+ }
+ // fileSize
+ TSizeInBytes fileSize = null;
+ try {
+ fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES);
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + chunkDataTO.status());
+ } else {
+ status = new TReturnStatus(code, chunkDataTO.errString());
+ }
+ GridUserInterface gridUser = null;
+ try {
+ if (chunkDataTO.vomsAttributes() != null && !chunkDataTO.vomsAttributes().trim().equals("")) {
+ gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(),
+ chunkDataTO.vomsAttributesArray());
+ } else {
+ gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN());
+ }
+
+ } catch (IllegalArgumentException e) {
+ log.error("Unexpected error on voms grid user creation." + " IllegalArgumentException: {}",
+ e.getMessage(), e);
+ }
+ // transferURL
+ /*
+ * whatever is read is just meaningless because PtG will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make PtGChunkData
+ PtGPersistentChunkData aux = null;
+ try {
+ aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, dirOption,
+ transferProtocols, fileSize, status, transferURL);
+ aux.setPrimaryKey(chunkDataTO.primaryKey());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.fail(chunkDataTO);
+ log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from "
+ + "persistence. Dropping chunk from request {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedPtGChunkDataTO chunkTO, final ReducedPtGChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtGChunkDataAttributesException
+ */
+ private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO,
+ final PtGPersistentChunkData chunk) throws InvalidReducedPtGChunkDataAttributesException {
+
+ ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedPtGChunkData from the data contained in the received PtGChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtGChunkDataAttributesException
+ */
+ private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk)
+ throws InvalidReducedPtGChunkDataAttributesException {
+
+ ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), chunk.getStatus());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedPtGChunkDataTO from the data contained in the received PtGChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) {
+
+ ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
+ reducedChunkTO.setFromSURL(chunkTO.fromSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
+ reducedChunkTO.setStatus(chunkTO.status());
+ reducedChunkTO.setErrString(chunkTO.errString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received PtGChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(PtGChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null);
+ }
+
+ /**
+ * Method used to add into Persistence a new entry. The supplied PtGChunkData gets the primary key
+ * changed to the value assigned in Persistence.
+ *
+ * This method is intended to be used by a recursive PtG request: the parent request supplies a
+ * directory which must be expanded, so all new children requests resulting from the files in the
+ * directory are added into persistence.
+ *
+ * So this method does _not_ add a new SRM prepare_to_get request into the DB!
+ *
+ * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and
+ * explanation.
+ *
+ * In case of any error the operation does not proceed, but no Exception is thrown! Proper
+ * messages get logged by underlaying DAO.
+ */
+ synchronized public void addChild(PtGPersistentChunkData chunkData) {
+
+ PtGChunkDataTO to = new PtGChunkDataTO();
+ /* needed for now to find ID of request! Must be changed soon! */
+ to.setRequestToken(chunkData.getRequestToken().toString());
+ to.setFromSURL(chunkData.getSURL().toString());
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
+
+ to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive());
+ to.setDirOption(chunkData.getDirOption().isDirectory());
+ to.setNumLevel(chunkData.getDirOption().getNumLevel());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ /* add the entry and update the Primary Key field! */
+ dao.addChild(to);
+ /* set the assigned PrimaryKey! */
+ chunkData.setPrimaryKey(to.primaryKey());
+ }
+
+ public void updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode,
+ String explanation) {
+
+ dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, new String[] {surl.rawSurl()},
+ statusCode, explanation);
+ }
+
+ public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java
deleted file mode 100644
index 393c1f62e..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java
+++ /dev/null
@@ -1,1778 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.ea.StormEA;
-import it.grid.storm.namespace.NamespaceDirector;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.StoRI;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.InvalidTSURLAttributesException;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TRequestType;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Timer;
-import java.util.TimerTask;
-
-/**
- * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- *
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID ICTP
- * @version 3.0
- * @date June 2005
- */
-public class PtGChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(PtGChunkDAO.class);
-
- /** String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /** String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getDBURL();
- /** String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /** String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
-
- /** Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
- /** boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- /** Singleton instance */
- private final static PtGChunkDAO dao = new PtGChunkDAO();
-
- /** timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /**
- * timer task that will update the boolean signaling that a reconnection is
- * needed!
- */
- private TimerTask clockTask = null;
- /** milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance()
- .getDBReconnectPeriod() * 1000;
- /** initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- private PtGChunkDAO() {
-
- setUpConnection();
-
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the PtGChunkDAO.
- */
- public static PtGChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB.
- *
- * The supplied PtGChunkData is used to fill in only the DB table where file
- * specific info gets recorded: it does _not_ add a new request! So if
- * spurious data is supplied, it will just stay there because of a lack of a
- * parent request!
- */
- public synchronized void addChild(PtGChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: addChild - unable to get a valid connection!");
- return;
- }
- String str = null;
- PreparedStatement id = null; // statement to find out the ID associated to
- // the request token
- ResultSet rsid = null; // result set containing the ID of the request.
- try {
-
- // WARNING!!!! We are forced to run a query to get the ID of the request,
- // which should NOT be so
- // because the corresponding request object should have been changed with
- // the extra field! However, it is not possible
- // at the moment to perform such chage because of strict deadline and the
- // change could wreak havoc
- // the code. So we are forced to make this query!!!
-
- // begin transaction
- con.setAutoCommit(false);
- printWarnings(con.getWarnings());
-
- // find ID of request corresponding to given RequestToken
- str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?";
-
- id = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- id.setString(1, to.requestToken());
- printWarnings(id.getWarnings());
-
- log.debug("PTG CHUNK DAO: addChild; {}", id.toString());
- rsid = id.executeQuery();
- printWarnings(id.getWarnings());
-
- /* ID of request in request_process! */
- int request_id = extractID(rsid);
- int id_s = fillPtGTables(to, request_id);
-
- /* end transaction! */
- con.commit();
- printWarnings(con.getWarnings());
- con.setAutoCommit(true);
- printWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: unable to complete addChild! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("PTG CHUNK DAO: unable to complete addChild! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rsid);
- close(id);
- }
- }
-
- /**
- * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets
- * its primaryKey changed to the one assigned by the DB. The client_dn must
- * also be supplied as a String.
- *
- * The supplied PtGChunkData is used to fill in all the DB tables where file
- * specific info gets recorded: it _adds_ a new request!
- */
- public synchronized void addNew(PtGChunkDataTO to, String client_dn) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: addNew - unable to get a valid connection!");
- return;
- }
- String str = null;
- /* Result set containing the ID of the inserted new request */
- ResultSet rs_new = null;
- /* Insert new request into process_request */
- PreparedStatement addNew = null;
- /* Insert protocols for request. */
- PreparedStatement addProtocols = null;
- try {
- // begin transaction
- con.setAutoCommit(false);
- printWarnings(con.getWarnings());
-
- // add to request_queue...
- str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) VALUES (?,?,?,?,?,?,?,?)";
- addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- /* Request type set to prepare to get! */
- addNew.setString(1,
- RequestTypeConverter.getInstance().toDB(TRequestType.PREPARE_TO_GET));
- printWarnings(addNew.getWarnings());
-
- addNew.setString(2, client_dn);
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(3, to.lifeTime());
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(
- 4,
- StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_INPROGRESS));
- printWarnings(addNew.getWarnings());
-
- addNew.setString(5, "New PtG Request resulting from srmCopy invocation.");
- printWarnings(addNew.getWarnings());
-
- addNew.setString(6, to.requestToken());
- printWarnings(addNew.getWarnings());
-
- addNew.setInt(7, 1); // number of requested files set to 1!
- printWarnings(addNew.getWarnings());
-
- addNew.setTimestamp(8, new Timestamp(new Date().getTime()));
- printWarnings(addNew.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addNew.toString());
- addNew.execute();
- printWarnings(addNew.getWarnings());
-
- rs_new = addNew.getGeneratedKeys();
- int id_new = extractID(rs_new);
-
- // add protocols...
- str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)";
- addProtocols = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- for (Iterator i = to.protocolList().iterator(); i.hasNext();) {
- addProtocols.setInt(1, id_new);
- printWarnings(addProtocols.getWarnings());
-
- addProtocols.setString(2, i.next());
- printWarnings(addProtocols.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addProtocols.toString());
- addProtocols.execute();
- printWarnings(addProtocols.getWarnings());
- }
-
- // addChild...
- int id_s = fillPtGTables(to, id_new);
-
- // end transaction!
- con.commit();
- printWarnings(con.getWarnings());
- con.setAutoCommit(true);
- printWarnings(con.getWarnings());
-
- // update primary key reading the generated key
- to.setPrimaryKey(id_s);
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } catch (Exception e) {
- log.error("PTG CHUNK DAO: unable to complete addNew! "
- + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e);
- rollback(con);
- } finally {
- close(rs_new);
- close(addNew);
- close(addProtocols);
- }
- }
-
- /**
- * To be used inside a transaction
- *
- * @param to
- * @param requestQueueID
- * @return
- * @throws SQLException
- * @throws Exception
- */
- private synchronized int fillPtGTables(PtGChunkDataTO to, int requestQueueID)
- throws SQLException, Exception {
-
- String str = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_do = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_g = null;
- /* Result set containing the ID of the inserted */
- ResultSet rs_s = null;
- /* insert TDirOption for request */
- PreparedStatement addDirOption = null;
- /* insert request_Get for request */
- PreparedStatement addGet = null;
- PreparedStatement addChild = null;
-
- try {
- // first fill in TDirOption
- str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)";
- addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addDirOption.setBoolean(1, to.dirOption());
- printWarnings(addDirOption.getWarnings());
-
- addDirOption.setBoolean(2, to.allLevelRecursive());
- printWarnings(addDirOption.getWarnings());
-
- addDirOption.setInt(3, to.numLevel());
- printWarnings(addDirOption.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addDirOption.toString());
- addDirOption.execute();
- printWarnings(addDirOption.getWarnings());
-
- rs_do = addDirOption.getGeneratedKeys();
- int id_do = extractID(rs_do);
-
- // second fill in request_Get... sourceSURL and TDirOption!
- str = "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)";
- addGet = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addGet.setInt(1, id_do);
- printWarnings(addGet.getWarnings());
-
- addGet.setInt(2, requestQueueID);
- printWarnings(addGet.getWarnings());
-
- addGet.setString(3, to.fromSURL());
- printWarnings(addGet.getWarnings());
-
- addGet.setString(4, to.normalizedStFN());
- printWarnings(addGet.getWarnings());
-
- addGet.setInt(5, to.surlUniqueID());
- printWarnings(addGet.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addGet.toString());
- addGet.execute();
- printWarnings(addGet.getWarnings());
-
- rs_g = addGet.getGeneratedKeys();
- int id_g = extractID(rs_g);
-
- // third fill in status_Get...
- str = "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)";
- addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS);
- printWarnings(con.getWarnings());
- addChild.setInt(1, id_g);
- printWarnings(addChild.getWarnings());
-
- addChild.setInt(2, to.status());
- printWarnings(addChild.getWarnings());
-
- addChild.setString(3, to.errString());
- printWarnings(addChild.getWarnings());
-
- log.trace("PTG CHUNK DAO: addNew; {}", addChild.toString());
- addChild.execute();
- printWarnings(addChild.getWarnings());
-
- return id_g;
- } finally {
- close(rs_do);
- close(rs_g);
- close(rs_s);
- close(addDirOption);
- close(addGet);
- close(addChild);
- }
- }
-
- /**
- * Method used to save the changes made to a retrieved PtGChunkDataTO, back
- * into the MySQL DB.
- *
- * Only the fileSize, transferURL, statusCode and explanation, of status_Get
- * table are written to the DB. Likewise for the request pinLifetime.
- *
- * In case of any error, an error message gets logged but no exception is
- * thrown.
- */
- public synchronized void update(PtGChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updateFileReq = null;
- try {
- // ready updateFileReq...
- updateFileReq = con
- .prepareStatement("UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) "
- + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? "
- + "WHERE rg.ID=?");
- printWarnings(con.getWarnings());
-
- updateFileReq.setLong(1, to.fileSize());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(2, to.turl());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(3, to.status());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(4, to.errString());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(5, to.lifeTime());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setString(6, to.normalizedStFN());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setInt(7, to.surlUniqueID());
- printWarnings(updateFileReq.getWarnings());
-
- updateFileReq.setLong(8, to.primaryKey());
- printWarnings(updateFileReq.getWarnings());
- // execute update
- log.trace("PTG CHUNK DAO: update method; {}", updateFileReq.toString());
- updateFileReq.executeUpdate();
- printWarnings(updateFileReq.getWarnings());
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO: Unable to complete update! {}",
- e.getMessage(), e);
- } finally {
- close(updateFileReq);
- }
- }
-
- /**
- * Updates the request_Get represented by the received ReducedPtGChunkDataTO
- * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? "
- + "WHERE rg.ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- printWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - update incomplete: {}", stmt.toString());
- stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * TODO WARNING! THIS IS A WORK IN PROGRESS!!!
- *
- * Method used to refresh the PtGChunkDataTO information from the MySQL DB.
- *
- * In this first version, only the statusCode and the TURL are reloaded from
- * the DB. TODO The next version must contains all the information related to
- * the Chunk!
- *
- * In case of any error, an error messagge gets logged but no exception is
- * thrown.
- */
-
- public synchronized PtGChunkDataTO refresh(long primary_key) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String queryString = null;
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
- // get chunks of the request
- queryString = "SELECT sg.statusCode, sg.transferURL "
- + "FROM status_Get sg " + "WHERE sg.request_GetID=?";
- find = con.prepareStatement(queryString);
- printWarnings(con.getWarnings());
- find.setLong(1, primary_key);
- printWarnings(find.getWarnings());
- log.trace("PTG CHUNK DAO: refresh status method; {}", find.toString());
-
- rs = find.executeQuery();
-
- printWarnings(find.getWarnings());
- PtGChunkDataTO chunkDataTO = null;
- // The result shoul be un
- while (rs.next()) {
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setTurl(rs.getString("sg.transferURL"));
- }
- return chunkDataTO;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return null TransferObject! */
- return null;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding PtGChunkDataTO
- * objects.
- *
- * An initial simple query establishes the list of protocols associated with
- * the request. A second complex query establishes all chunks associated with
- * the request, by properly joining request_queue, request_Get, status_Get and
- * request_DirOption. The considered fields are:
- *
- * (1) From status_Get: the ID field which becomes the TOs primary key, and
- * statusCode.
- *
- * (2) From request_Get: sourceSURL
- *
- * (3) From request_queue: pinLifetime
- *
- * (4) From request_DirOption: isSourceADirectory, alLevelRecursive,
- * numOfLevels
- *
- * In case of any error, a log gets written and an empty collection is
- * returned. No exception is thrown.
- *
- * NOTE! Chunks in SRM_ABORTED status are NOT returned!
- */
- public synchronized Collection find(TRequestToken requestToken) {
-
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- String strToken = requestToken.toString();
- String str = null;
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- str = "SELECT tp.config_ProtocolsID "
- + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID "
- + "WHERE rq.r_token=?";
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List protocols = new ArrayList();
- find.setString(1, strToken);
- printWarnings(find.getWarnings());
-
- log.trace("PTG CHUNK DAO: find method; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(find);
-
- // get chunks of the request
- str = "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, "
- + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, "
- + "d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID "
- + "WHERE rq.r_token=? AND sg.statusCode<>?";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- ArrayList list = new ArrayList();
- find.setString(1, strToken);
- printWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED));
- printWarnings(find.getWarnings());
-
- log.trace("PTG CHUNK DAO: find method; " + find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- PtGChunkDataTO chunkDataTO;
- while (rs.next()) {
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setRequestToken(strToken);
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separeted by the "#" char. The proxy is a BLOB, hence it has to be
- * properly conveted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
- chunkDataTO.setProtocolList(protocols);
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: ", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkDataTO associated to the
- * given TRequestToken expressed as String.
- */
- public synchronized Collection findReduced(
- String reqtoken) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- // get reduced chunks
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.r_token=?";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, reqtoken);
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with request token; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO reducedChunkDataTO = null;
- while (rs.next()) {
- reducedChunkDataTO = new ReducedPtGChunkDataTO();
- reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- reducedChunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- reducedChunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(reducedChunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- public synchronized Collection findReduced(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surlsArray) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
-
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surlsArray) + " ) ";
-
- find = con.prepareStatement(str);
-
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, requestToken.getValue());
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedPtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that returns a Collection of ReducedPtGChunkDataTO associated to the
- * given griduser, and whose SURLs are contained in the supplied array of
- * Strings.
- */
- public synchronized Collection findReduced(
- String griduser, int[] surlUniqueIDs, String[] surls) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- /*
- * NOTE: we search also on the fromSurl because otherwise we lost all
- * request_get that have not the uniqueID set because are not yet been
- * used by anybody
- */
- // get reduced chunks
- String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- ArrayList list = new ArrayList();
- find.setString(1, griduser);
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- ReducedPtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
- chunkDataTO = new ReducedPtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(uniqueID);
- }
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* Return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method used in extraordinary situations to signal that data retrieved from
- * the DB was malformed and could not be translated into the StoRM object
- * model.
- *
- * This method attempts to change the status of the request to SRM_FAILURE and
- * record it in the DB.
- *
- * This operation could potentially fail because the source of the malformed
- * problems could be a problematic DB; indeed, initially only log messagges
- * where recorded.
- *
- * Yet it soon became clear that the source of malformed data were the clients
- * and/or FE recording info in the DB. In these circumstances the client would
- * see its request as being in the SRM_IN_PROGRESS state for ever. Hence the
- * pressing need to inform it of the encountered problems.
- */
- public synchronized void signalMalformedPtGChunk(PtGChunkDataTO auxTO) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: signalMalformedPtGChunk - unable to get a valid connection!");
- return;
- }
- String signalSQL = "UPDATE status_Get SET statusCode="
- + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE)
- + ", explanation=? WHERE request_GetID=" + auxTO.primaryKey();
- PreparedStatement signal = null;
- try {
- signal = con.prepareStatement(signalSQL);
- printWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- signal.setString(1, "Request is malformed!");
- printWarnings(signal.getWarnings());
-
- log.trace("PTG CHUNK DAO: signalMalformed; {}", signal.toString());
- signal.executeUpdate();
- printWarnings(signal.getWarnings());
- } catch (SQLException e) {
- log.error("PtGChunkDAO! Unable to signal in DB that the request was "
- + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString());
- } finally {
- close(signal);
- }
- }
-
- /**
- * Method that returns the number of Get requests on the given SURL, that are
- * in SRM_FILE_PINNED state.
- *
- * This method is intended to be used by PtGChunkCatalog in the
- * isSRM_FILE_PINNED method invocation.
- *
- * In case of any error, 0 is returned.
- */
- // request_Get table
- public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: numberInSRM_FILE_PINNED - unable to get a valid connection!");
- return 0;
- }
- String str = "SELECT COUNT(rg.ID) "
- + "FROM status_Get sg JOIN request_Get rg "
- + "ON (sg.request_GetID=rg.ID) "
- + "WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?";
- PreparedStatement find = null;
- ResultSet rs = null;
- try {
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- /* Prepared statement spares DB-specific String notation! */
- find.setInt(1, surlUniqueID);
- printWarnings(find.getWarnings());
-
- find.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(find.getWarnings());
-
- log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
-
- int numberFilePinned = 0;
- if (rs.next()) {
- numberFilePinned = rs.getInt(1);
- }
- return numberFilePinned;
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! "
- + "Returning 0! {}", e.getMessage(), e);
- return 0;
- } finally {
- close(rs);
- close(find);
- }
- }
-
- /**
- * Method that updates all expired requests in SRM_FILE_PINNED state, into
- * SRM_RELEASED.
- *
- * This is needed when the client forgets to invoke srmReleaseFiles().
- *
- * @return
- */
- public synchronized List transitExpiredSRM_FILE_PINNED() {
-
- // tring to the surl unique ID
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: transitExpiredSRM_FILE_PINNED - unable to get a valid connection!");
- return new ArrayList();
- }
- HashMap expiredSurlMap = new HashMap();
- String str = null;
- // Statement statement = null;
- PreparedStatement preparedStatement = null;
-
- /* Find all expired surls */
- try {
- // start transaction
- con.setAutoCommit(false);
-
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID "
- + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- ResultSet res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}: "
- + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e);
- }
- }
- expiredSurlMap.put(sourceSURL, uniqueID);
- }
-
- if (expiredSurlMap.isEmpty()) {
- commit(con);
- log
- .trace("PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED.");
- return new ArrayList();
- }
- } catch (SQLException e) {
- log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(preparedStatement);
- }
-
- /* Update status of all expired surls to SRM_RELEASED */
-
- preparedStatement = null;
- try {
-
- str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? "
- + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(preparedStatement.getWarnings());
-
- preparedStatement.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(preparedStatement.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}",
- preparedStatement.toString());
-
- int count = preparedStatement.executeUpdate();
- printWarnings(preparedStatement.getWarnings());
-
- if (count == 0) {
- log.trace("PtGChunkDAO! No chunk of PtG request was "
- + "transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtGChunkDAO! {} chunks of PtG requests were transited from"
- + " SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtGChunkDAO! Unable to transit expired SRM_FILE_PINNED chunks "
- + "of PtG requests, to SRM_RELEASED! {}", e.getMessage(), e);
- rollback(con);
- return new ArrayList();
- } finally {
- close(preparedStatement);
- }
-
- /*
- * in order to enhance performance here we can check if there is any file
- * system with tape (T1D0, T1D1), if there is not any we can skip the
- * following
- */
-
- /* Find all not expired surls from PtG and BoL */
-
- HashSet pinnedSurlSet = new HashSet();
- try {
-
- // SURLs pinned by PtGs
- str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM "
- + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "WHERE sg.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
-
- ResultSet res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rg.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}. "
- + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage());
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
-
- close(preparedStatement);
-
- // SURLs pinned by BoLs
- str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM "
- + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID "
- + "WHERE sb.statusCode=?"
- + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime ";
-
- preparedStatement = con.prepareStatement(str);
- preparedStatement.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS));
-
- res = preparedStatement.executeQuery();
- printWarnings(preparedStatement.getWarnings());
-
- while (res.next()) {
- String sourceSURL = res.getString("rb.sourceSURL");
- Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID"));
- /* If the uniqueID is not setted compute it */
- if (res.wasNull()) {
- try {
- TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL);
- uniqueID = tsurl.uniqueId();
- } catch (InvalidTSURLAttributesException e) {
- log.warn("PtGChunkDAO! unable to build the TSURL from {}. "
- + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e);
- }
- }
- pinnedSurlSet.add(uniqueID);
- }
- commit(con);
- } catch (SQLException e) {
- log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e);
- rollback(con);
- } finally {
- close(preparedStatement);
- }
-
- ArrayList expiredSurlList = new ArrayList();
- /* Remove the Extended Attribute pinned if there is not a valid surl on it */
- TSURL surl;
- for (Entry surlEntry : expiredSurlMap.entrySet()) {
- if (!pinnedSurlSet.contains(surlEntry.getValue())) {
- try {
- surl = TSURL.makeFromStringValidate(surlEntry.getKey());
- } catch (InvalidTSURLAttributesException e) {
- log.error("Invalid SURL, cannot release the pin "
- + "(Extended Attribute): {}", surlEntry.getKey());
- continue;
- }
- expiredSurlList.add(surl);
- StoRI stori;
- try {
- stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl);
- } catch (Throwable e) {
- log.error("Invalid SURL {} cannot release the pin. {}: {}",
- surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage(), e);
- continue;
- }
-
- if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) {
- StormEA.removePinned(stori.getAbsolutePath());
- }
- }
- }
- return expiredSurlList;
- }
-
- /**
- * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED.
- * An array of long representing the primary key of each chunk is required:
- * only they get the status changed provided their current status is
- * SRM_FILE_PINNED.
- *
- * This method is used during srmReleaseFiles
- *
- * In case of any error nothing happens and no exception is thrown, but proper
- * messagges get logged.
- */
- public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_Get sg SET sg.statusCode=? "
- + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2,
- StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}",
- stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was "
- + "transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited "
- + "from SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to transit chunks"
- + " from SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * @param ids
- * @param token
- */
- public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids,
- TRequestToken token) {
-
- if (token == null) {
- transitSRM_FILE_PINNEDtoSRM_RELEASED(ids);
- return;
- }
-
- /*
- * If a request token has been specified, only the related Get requests
- * have to be released. This is done adding the r.r_token="..." clause in
- * the where subquery.
- */
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - "
- + "unable to get a valid connection!");
- return;
- }
-
- String str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='"
- + token.toString() + "' AND rg.ID IN " + makeWhereString(ids);
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED));
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was"
- + " transited from SRM_FILE_PINNED to SRM_RELEASED.");
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from "
- + "SRM_FILE_PINNED to SRM_RELEASED.", count);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to transit chunks from "
- + "SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void updateStatus(TRequestToken requestToken,
- int[] surlUniqueIDs, String[] surls, TStatusCode statusCode,
- String explanation) {
-
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE "
- + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='"
- + requestToken.toString() + "' AND ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(statusCode));
- printWarnings(stmt.getWarnings());
-
- stmt.setString(2, (explanation != null ? explanation : ""));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - updateStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.",
- statusCode);
- } else {
- log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.",
- count, statusCode);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode,
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- public synchronized void updateStatusOnMatchingStatus(int[] surlsUniqueIDs,
- String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surls == null || explanation == null
- || surlsUniqueIDs.length == 0 || surls.length == 0
- || surlsUniqueIDs.length != surls.length) {
-
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls="
- + surls + " explanation=" + explanation);
- }
-
- doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, explanation, false, true, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode, String explanation) {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || explanation == null) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken + " explanation="
- + explanation);
- }
- doUpdateStatusOnMatchingStatus(requestToken, null, null,
- expectedStatusCode, newStatusCode, explanation, true, false, true);
- }
-
- public synchronized void updateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode)
- throws IllegalArgumentException {
-
- if (requestToken == null || requestToken.getValue().trim().isEmpty()
- || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0
- || surls.length == 0 || surlsUniqueIDs.length != surls.length) {
- throw new IllegalArgumentException(
- "Unable to perform the updateStatusOnMatchingStatus, "
- + "invalid arguments: requestToken=" + requestToken
- + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls);
- }
- doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode, null, true, true, false);
- }
-
- public synchronized void doUpdateStatusOnMatchingStatus(
- TRequestToken requestToken, int[] surlUniqueIDs, String[] surls,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation, boolean withRequestToken, boolean withSurls,
- boolean withExplanation) throws IllegalArgumentException {
-
- if ((withRequestToken && requestToken == null)
- || (withExplanation && explanation == null)
- || (withSurls && (surlUniqueIDs == null || surls == null))) {
-
- throw new IllegalArgumentException(
- "Unable to perform the doUpdateStatusOnMatchingStatus, "
- + "invalid arguments: withRequestToken=" + withRequestToken
- + " requestToken=" + requestToken + " withSurls=" + withSurls
- + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls
- + " withExplaination=" + withExplanation + " explanation="
- + explanation);
- }
- if (!checkConnection()) {
- log
- .error("PTG CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) "
- + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID "
- + "SET sg.statusCode=? ";
- if (withExplanation) {
- str += " , " + buildExpainationSet(explanation);
- }
- str += " WHERE sg.statusCode=? ";
- if (withRequestToken) {
- str += " AND " + buildTokenWhereClause(requestToken);
- }
- if (withSurls) {
- str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls);
- }
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
- stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode));
- printWarnings(stmt.getWarnings());
-
- stmt
- .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode));
- printWarnings(stmt.getWarnings());
-
- log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString());
- int count = stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- if (count == 0) {
- log.trace("PtG CHUNK DAO! No chunk of PtG request was updated "
- + "from {} to {}.", expectedStatusCode, newStatusCode);
- } else {
- log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated "
- + "from {} to {}.", count, expectedStatusCode, newStatusCode);
- }
- } catch (SQLException e) {
- log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}",
- expectedStatusCode, newStatusCode, e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Auxiliary method used to close a ResultSet
- */
- private void close(ResultSet rset) {
-
- if (rset != null) {
- try {
- rset.close();
- } catch (Exception e) {
- log.error("PTG CHUNK DAO! Unable to close ResultSet! Error: {}",
- e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to close a Statement
- */
- private void close(Statement stmt) {
-
- if (stmt != null) {
- try {
- stmt.close();
- } catch (Exception e) {
- log.error("PTG CHUNK DAO! Unable to close Statement {} - Error: {}",
- stmt.toString(), e.getMessage(), e);
- }
- }
- }
-
- private void commit(Connection con) {
-
- if (con != null) {
- try {
- con.commit();
- con.setAutoCommit(true);
- } catch (SQLException e) {
- log.error("PtG, SQL Exception: {}", e.getMessage(), e);
- }
- }
- }
-
- /**
- * Auxiliary method used to roll back a failed transaction
- */
- private void rollback(Connection con) {
-
- if (con != null) {
- try {
- con.rollback();
- con.setAutoCommit(true);
- log.error("PTG CHUNK DAO: roll back successful!");
- } catch (SQLException e2) {
- log.error("PTG CHUNK DAO: roll back failed! {}", e2.getMessage(), e2);
- }
- }
- }
-
- /**
- * Private method that returns the generated ID: it throws an exception in
- * case of any problem!
- */
- private int extractID(ResultSet rs) throws Exception {
-
- if (rs == null) {
- throw new Exception("PTG CHUNK DAO! Null ResultSet!");
- }
- if (rs.next()) {
- return rs.getInt(1);
- } else {
- log.error("PTG CHUNK DAO! It was not possible to establish "
- + "the assigned autoincrement primary key!");
- throw new Exception("PTG CHUNK DAO! It was not possible to"
- + " establish the assigned autoincrement primary key!");
- }
- }
-
- /**
- * Method that returns a String containing all IDs.
- */
- private String makeWhereString(long[] rowids) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = rowids.length;
- for (int i = 0; i < n; i++) {
- sb.append(rowids[i]);
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surl's IDs.
- */
- private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) {
-
- StringBuilder sb = new StringBuilder("(");
- for (int i = 0; i < surlUniqueIDs.length; i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(surlUniqueIDs[i]);
- }
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Method that returns a String containing all Surls.
- */
- private String makeSurlString(String[] surls) {
-
- StringBuilder sb = new StringBuilder("(");
- int n = surls.length;
-
- for (int i = 0; i < n; i++) {
-
- SURL requestedSURL;
-
- try {
- requestedSURL = SURL.makeSURLfromString(surls[i]);
- } catch (NamespaceException e) {
- log.error(e.getMessage());
- log.debug("Skip '{}' during query creation", surls[i]);
- continue;
- }
-
- sb.append("'");
- sb.append(requestedSURL.getNormalFormAsString());
- sb.append("','");
- sb.append(requestedSURL.getQueryFormAsString());
- sb.append("'");
-
- if (i < (n - 1)) {
- sb.append(",");
- }
- }
-
- sb.append(")");
- return sb.toString();
- }
-
- /**
- * Auxiliary method that sets up the connection to the DB, as well as the
- * prepared statement.
- */
- private boolean setUpConnection() {
-
- boolean response = false;
- try {
- Class.forName(driver);
- con = DriverManager.getConnection(url, name, password);
- printWarnings(con.getWarnings());
- response = con.isValid(0);
- } catch (ClassNotFoundException | SQLException e) {
- log.error("PTG CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e);
- }
- return response;
- }
-
- /**
- * Auxiliary method that checks if time for resetting the connection has come,
- * and eventually takes it down and up back again.
- */
- private boolean checkConnection() {
-
- boolean response = true;
- if (reconnect) {
- log.debug("PTG CHUNK DAO! Reconnecting to DB! ");
- takeDownConnection();
- response = setUpConnection();
- if (response) {
- reconnect = false;
- }
- }
- return response;
- }
-
- /**
- * Auxiliary method that tales down a connection to the DB.
- */
- private void takeDownConnection() {
-
- if (con != null) {
- try {
- con.close();
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO! Exception in takeDownConnection method: {}",
- e.getMessage(), e);
- }
- }
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0 || dn == null) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " dn=" + dn);
- }
- return find(surlsUniqueIDs, surlsArray, dn, true);
- }
-
- public Collection find(int[] surlsUniqueIDs,
- String[] surlsArray) throws IllegalArgumentException {
-
- if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0
- || surlsArray == null || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray);
- }
- return find(surlsUniqueIDs, surlsArray, null, false);
- }
-
- private synchronized Collection find(int[] surlsUniqueIDs,
- String[] surlsArray, String dn, boolean withDn)
- throws IllegalArgumentException {
-
- if ((withDn && dn == null) || surlsUniqueIDs == null
- || surlsUniqueIDs.length == 0 || surlsArray == null
- || surlsArray.length == 0) {
- throw new IllegalArgumentException("Unable to perform the find, "
- + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs
- + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn);
- }
- if (!checkConnection()) {
- log.error("PTG CHUNK DAO: find - unable to get a valid connection!");
- return new ArrayList();
- }
- PreparedStatement find = null;
- ResultSet rs = null;
-
- try {
-
- String str = "SELECT rq.ID, rq.r_token, sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, "
- + "rq.client_dn, rq.proxy, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, "
- + "d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels "
- + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) "
- + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) "
- + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID "
- + "WHERE ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs)
- + " AND rg.sourceSURL IN "
- + makeSurlString(surlsArray) + " )";
-
- if (withDn) {
-
- str += " AND rq.client_dn=\'" + dn + "\'";
- }
-
- find = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- List list = new ArrayList();
-
- log.trace("PTG CHUNK DAO - find method: {}", find.toString());
- rs = find.executeQuery();
- printWarnings(find.getWarnings());
- PtGChunkDataTO chunkDataTO = null;
- while (rs.next()) {
-
- chunkDataTO = new PtGChunkDataTO();
- chunkDataTO.setStatus(rs.getInt("sg.statusCode"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setPrimaryKey(rs.getLong("rg.ID"));
- chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL"));
-
- chunkDataTO.setNormalizedStFN(rs
- .getString("rg.normalized_sourceSURL_StFN"));
- int uniqueID = rs.getInt("rg.sourceSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(new Integer(uniqueID));
- }
-
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separeted by the "#" char. The proxy is a BLOB, hence it has to be
- * properly conveted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory"));
- chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive"));
- chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels"));
-
- list.add(chunkDataTO);
- }
- return list;
- } catch (SQLException e) {
- log.error("PTG CHUNK DAO: {}", e.getMessage(), e);
- /* return empty Collection! */
- return new ArrayList();
- } finally {
- close(rs);
- close(find);
- }
- }
-
- private String buildExpainationSet(String explanation) {
-
- return " sg.explanation='" + explanation + "' ";
- }
-
- private String buildTokenWhereClause(TRequestToken requestToken) {
-
- return " rq.r_token='" + requestToken.toString() + "' ";
- }
-
- private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) {
-
- return " ( rg.sourceSURL_uniqueID IN "
- + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN "
- + makeSurlString(surls) + " ) ";
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java
deleted file mode 100644
index 7baf7ee18..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.common.types.TURLPrefix;
-import java.sql.Timestamp;
-import java.util.List;
-import it.grid.storm.namespace.model.Protocol;
-
-/**
- * Class that represents a row in the Persistence Layer: this is all raw data
- * referring to the PtGChunkData proper, that is, String and primitive types.
- *
- * Each field is initialized with default values as per SRM 2.2 specification:
- * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED
- *
- * All other fields are 0 if int, or a white space if String.
- *
- * @author EGRID ICTP
- * @version 3.0
- * @date June 2005
- */
-public class PtGChunkDataTO {
-
- private static final String FQAN_SEPARATOR = "#";
- /* Database table request_Get fields BEGIN */
- private long primaryKey = -1; // ID primary key of record in DB
- private boolean dirOption; // initialised in constructor
- private String fromSURL = " ";
- private String normalizedStFN = null;
- private Integer surlUniqueID = null;
- /* Database table request_Get fields END */
-
- private String requestToken = " ";
- private int lifetime = 0;
- private boolean allLevelRecursive; // initialised in constructor
- private int numLevel; // initialised in constructor
- private List protocolList = null; // initialised in constructor
- private long filesize = 0;
- private int status; // initialised in constructor
- private String errString = " ";
- private String turl = " ";
- private Timestamp timeStamp;
- private String clientDN = null;
- private String vomsAttributes = null;
-
- public PtGChunkDataTO() {
-
- TURLPrefix protocolPreferences = new TURLPrefix();
- protocolPreferences.addProtocol(Protocol.GSIFTP);
- this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences);
- this.status = StatusCodeConverter.getInstance().toDB(
- TStatusCode.SRM_REQUEST_QUEUED);
- this.dirOption = false;
- //
- this.allLevelRecursive = false;
- this.numLevel = 0;
- }
-
- public long primaryKey() {
-
- return primaryKey;
- }
-
- public void setPrimaryKey(long n) {
-
- primaryKey = n;
- }
-
- public String requestToken() {
-
- return requestToken;
- }
-
- public void setRequestToken(String s) {
-
- requestToken = s;
- }
-
- public Timestamp timeStamp() {
-
- return timeStamp;
- }
-
- public void setTimeStamp(Timestamp timeStamp) {
-
- this.timeStamp = timeStamp;
- }
-
- public String fromSURL() {
-
- return fromSURL;
- }
-
- public void setFromSURL(String s) {
-
- fromSURL = s;
- }
-
- /**
- * @param normalizedStFN
- * the normalizedStFN to set
- */
- public void setNormalizedStFN(String normalizedStFN) {
-
- this.normalizedStFN = normalizedStFN;
- }
-
- /**
- * @return the normalizedStFN
- */
- public String normalizedStFN() {
-
- return normalizedStFN;
- }
-
- /**
- * @param sURLUniqueID
- * the sURLUniqueID to set
- */
- public void setSurlUniqueID(Integer sURLUniqueID) {
-
- this.surlUniqueID = sURLUniqueID;
- }
-
- /**
- * @return the sURLUniqueID
- */
- public Integer surlUniqueID() {
-
- return surlUniqueID;
- }
-
- public int lifeTime() {
-
- return lifetime;
- }
-
- public void setLifeTime(int n) {
-
- lifetime = n;
- }
-
- public boolean dirOption() {
-
- return dirOption;
- }
-
- public void setDirOption(boolean b) {
-
- dirOption = b;
- }
-
- public boolean allLevelRecursive() {
-
- return allLevelRecursive;
- }
-
- public void setAllLevelRecursive(boolean b) {
-
- allLevelRecursive = b;
- }
-
- public int numLevel() {
-
- return numLevel;
- }
-
- public void setNumLevel(int n) {
-
- numLevel = n;
- }
-
- public List protocolList() {
-
- return protocolList;
- }
-
- public void setProtocolList(List l) {
-
- if ((l != null) && (!l.isEmpty()))
- protocolList = l;
- }
-
- public long fileSize() {
-
- return filesize;
- }
-
- public void setFileSize(long n) {
-
- filesize = n;
- }
-
- public int status() {
-
- return status;
- }
-
- public void setStatus(int n) {
-
- status = n;
- }
-
- public String errString() {
-
- return errString;
- }
-
- public void setErrString(String s) {
-
- errString = s;
- }
-
- public String turl() {
-
- return turl;
- }
-
- public void setTurl(String s) {
-
- turl = s;
- }
-
- public String clientDN() {
-
- return clientDN;
- }
-
- public void setClientDN(String s) {
-
- clientDN = s;
- }
-
- public String vomsAttributes() {
-
- return vomsAttributes;
- }
-
- public void setVomsAttributes(String s) {
-
- vomsAttributes = s;
- }
-
- public void setVomsAttributes(String[] fqaNsAsString) {
-
- vomsAttributes = "";
- for (int i = 0; i < fqaNsAsString.length; i++) {
- vomsAttributes += fqaNsAsString[i];
- if (i < fqaNsAsString.length - 1) {
- vomsAttributes += FQAN_SEPARATOR;
- }
- }
-
- }
-
- public String[] vomsAttributesArray() {
-
- return vomsAttributes.split(FQAN_SEPARATOR);
- }
-
- public String toString() {
-
- StringBuilder sb = new StringBuilder();
- sb.append(primaryKey);
- sb.append(" ");
- sb.append(requestToken);
- sb.append(" ");
- sb.append(fromSURL);
- sb.append(" ");
- sb.append(normalizedStFN);
- sb.append(" ");
- sb.append(surlUniqueID);
- sb.append(" ");
- sb.append(lifetime);
- sb.append(" ");
- sb.append(dirOption);
- sb.append(" ");
- sb.append(allLevelRecursive);
- sb.append(" ");
- sb.append(numLevel);
- sb.append(" ");
- sb.append(protocolList);
- sb.append(" ");
- sb.append(filesize);
- sb.append(" ");
- sb.append(status);
- sb.append(" ");
- sb.append(errString);
- sb.append(" ");
- sb.append(turl);
- return sb.toString();
- }
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGData.java b/src/main/java/it/grid/storm/catalogs/PtGData.java
deleted file mode 100644
index 0ef728428..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtGData.java
+++ /dev/null
@@ -1,38 +0,0 @@
-package it.grid.storm.catalogs;
-
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TSizeInBytes;
-
-public interface PtGData extends FileTransferData {
-
- /**
- * Method that returns the requested pin life time for this chunk of the srm
- * request.
- */
- public TLifeTimeInSeconds getPinLifeTime();
-
- /**
- * Method that returns the dirOption specified in the srm request.
- */
- public TDirOption getDirOption();
-
- /**
- * Method that returns the file size for this chunk of the srm request.
- */
- public TSizeInBytes getFileSize();
-
- /**
- * Method used to set the size of the file corresponding to the requested
- * SURL. If the supplied TSizeInByte is null, then nothing gets set!
- */
- public void setFileSize(TSizeInBytes size);
-
- /**
- * Method that sets the status of this request to SRM_FILE_PINNED; it needs
- * the explanation String which describes the situation in greater detail; if
- * a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_FILE_PINNED(String explanation);
-
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java
deleted file mode 100644
index a363e36c2..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import it.grid.storm.common.types.TURLPrefix;
-import it.grid.storm.griduser.GridUserInterface;
-import it.grid.storm.srm.types.TDirOption;
-import it.grid.storm.srm.types.TLifeTimeInSeconds;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TReturnStatus;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TSizeInBytes;
-import it.grid.storm.srm.types.TStatusCode;
-import it.grid.storm.srm.types.TTURL;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class represents a PrepareToGetChunkData, that is part of a multifile
- * PrepareToGet srm request. It contains data about: the requestToken, the
- * fromSURL, the requested lifeTime of pinning, the TDirOption which tells
- * whether the requested SURL is a directory and if it must be recursed at all
- * levels, as well as the desired number of levels to recurse, the desired
- * transferProtocols in order of preference, the fileSize, and the transferURL
- * for the supplied SURL.
- *
- * @author EGRID - ICTP Trieste
- * @date March 21st, 2005
- * @version 3.0
- */
-public class PtGPersistentChunkData extends IdentityPtGData implements
- PersistentChunkData {
-
- private static final Logger log = LoggerFactory
- .getLogger(PtGPersistentChunkData.class);
-
- /**
- * long representing the primary key for the persistence layer, in the
- * status_Get table
- */
- private long primaryKey = -1;
-
- /**
- * This is the requestToken of the multifile srm request to which this chunk
- * belongs
- */
- private TRequestToken requestToken;
-
- /**
- * @param requestToken
- * @param fromSURL
- * @param lifeTime
- * @param dirOption
- * @param desiredProtocols
- * @param fileSize
- * @param status
- * @param transferURL
- * @throws InvalidPtGDataAttributesException
- */
- public PtGPersistentChunkData(GridUserInterface auth,
- TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime,
- TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize,
- TReturnStatus status, TTURL transferURL)
- throws InvalidPtGDataAttributesException,
- InvalidPtGDataAttributesException,
- InvalidFileTransferDataAttributesException,
- InvalidSurlRequestDataAttributesException {
-
- super(auth, fromSURL, lifeTime, dirOption, desiredProtocols, fileSize,
- status, transferURL);
- if (requestToken == null) {
- log.debug("PtGPersistentChunkData: requestToken is null!");
- throw new InvalidPtGPersistentChunkDataAttributesException(requestToken,
- fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status,
- transferURL);
- }
-
- this.requestToken = requestToken;
- }
-
- /**
- * Method used to get the primary key used in the persistence layer!
- */
- @Override
- public long getPrimaryKey() {
-
- return primaryKey;
- }
-
- /**
- * Method used to set the primary key to be used in the persistence layer!
- */
- public void setPrimaryKey(long l) {
-
- primaryKey = l;
- }
-
- /**
- * Method that returns the requestToken of the srm request to which this chunk
- * belongs.
- */
- @Override
- public TRequestToken getRequestToken() {
-
- return requestToken;
- }
-
- /**
- * Method that sets the status of this request to SRM_FILE_PINNED; it needs
- * the explanation String which describes the situation in greater detail; if
- * a null is passed, then an empty String is used as explanation.
- */
- public void changeStatusSRM_FILE_PINNED(String explanation) {
-
- setStatus(TStatusCode.SRM_FILE_PINNED, explanation);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#hashCode()
- */
- @Override
- public int hashCode() {
-
- final int prime = 31;
- int result = super.hashCode();
- result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32));
- result = prime * result
- + ((requestToken == null) ? 0 : requestToken.hashCode());
- return result;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#equals(java.lang.Object)
- */
- @Override
- public boolean equals(Object obj) {
-
- if (this == obj) {
- return true;
- }
- if (!super.equals(obj)) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- PtGPersistentChunkData other = (PtGPersistentChunkData) obj;
- if (primaryKey != other.primaryKey) {
- return false;
- }
- if (requestToken == null) {
- if (other.requestToken != null) {
- return false;
- }
- } else if (!requestToken.equals(other.requestToken)) {
- return false;
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
-
- StringBuilder builder = new StringBuilder();
- builder.append("PtGPersistentChunkData [primaryKey=");
- builder.append(primaryKey);
- builder.append(", requestToken=");
- builder.append(requestToken);
- builder.append(", pinLifeTime=");
- builder.append(pinLifeTime);
- builder.append(", dirOption=");
- builder.append(dirOption);
- builder.append(", fileSize=");
- builder.append(fileSize);
- builder.append(", transferProtocols=");
- builder.append(transferProtocols);
- builder.append(", SURL=");
- builder.append(SURL);
- builder.append(", status=");
- builder.append(status);
- builder.append(", transferURL=");
- builder.append(transferURL);
- builder.append("]");
- return builder.toString();
- }
-
- @Override
- public long getIdentifier() {
-
- return getPrimaryKey();
- }
-}
diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
index 482f3e15f..e00ec0367 100644
--- a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
+++ b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java
@@ -17,6 +17,15 @@
package it.grid.storm.catalogs;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
import it.grid.storm.common.types.SizeUnit;
import it.grid.storm.common.types.TURLPrefix;
import it.grid.storm.common.types.TimeUnit;
@@ -24,6 +33,26 @@
import it.grid.storm.griduser.AbstractGridUser;
import it.grid.storm.griduser.GridUserInterface;
import it.grid.storm.griduser.GridUserManager;
+import it.grid.storm.persistence.converter.FileLifetimeConverter;
+import it.grid.storm.persistence.converter.FileStorageTypeConverter;
+import it.grid.storm.persistence.converter.OverwriteModeConverter;
+import it.grid.storm.persistence.converter.PinLifetimeConverter;
+import it.grid.storm.persistence.converter.SizeInBytesIntConverter;
+import it.grid.storm.persistence.converter.SpaceTokenStringConverter;
+import it.grid.storm.persistence.converter.StatusCodeConverter;
+import it.grid.storm.persistence.converter.TURLConverter;
+import it.grid.storm.persistence.converter.TransferProtocolListConverter;
+import it.grid.storm.persistence.dao.PtPChunkDAO;
+import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException;
+import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException;
+import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql;
+import it.grid.storm.persistence.model.PtPChunkDataTO;
+import it.grid.storm.persistence.model.PtPPersistentChunkData;
+import it.grid.storm.persistence.model.ReducedPtPChunkData;
+import it.grid.storm.persistence.model.ReducedPtPChunkDataTO;
import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException;
import it.grid.storm.srm.types.InvalidTSURLAttributesException;
import it.grid.storm.srm.types.InvalidTSizeAttributesException;
@@ -39,20 +68,11 @@
import it.grid.storm.srm.types.TStatusCode;
import it.grid.storm.srm.types.TTURL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
/**
- * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and
- * provides methods for looking up a PtPChunkData based on TRequestToken, as
- * well as for updating data into persistence. Methods are also supplied to
- * evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit expired
- * SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED.
+ * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and provides methods for
+ * looking up a PtPChunkData based on TRequestToken, as well as for updating data into persistence.
+ * Methods are also supplied to evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit
+ * expired SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED.
*
* @author EGRID - ICTP Trieste
* @date June, 2005
@@ -60,554 +80,412 @@
*/
public class PtPChunkCatalog {
- private static final Logger log = LoggerFactory
- .getLogger(PtPChunkCatalog.class);
-
- /* only instance of PtPChunkCatalog present in StoRM! */
- private static final PtPChunkCatalog cat = new PtPChunkCatalog();
- private final PtPChunkDAO dao = PtPChunkDAO.getInstance();
-
- private PtPChunkCatalog() {}
-
- /**
- * Method that returns the only instance of PtPChunkCatalog available.
- */
- public static PtPChunkCatalog getInstance() {
-
- return cat;
- }
-
- /**
- * Method used to update into Persistence a retrieved PtPChunkData.
- */
- synchronized public void update(PtPPersistentChunkData chunkData) {
-
- PtPChunkDataTO to = new PtPChunkDataTO();
- /* rimary key needed by DAO Object */
- to.setPrimaryKey(chunkData.getPrimaryKey());
- to.setStatus(StatusCodeConverter.getInstance().toDB(
- chunkData.getStatus().getStatusCode()));
- to.setErrString(chunkData.getStatus().getExplanation());
- to.setTransferURL(TURLConverter.getInstance().toDB(
- chunkData.getTransferURL().toString()));
- to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(
- chunkData.pinLifetime().value()));
- to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(
- chunkData.fileLifetime().value()));
- to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(
- chunkData.fileStorageType()));
- to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(
- chunkData.overwriteOption()));
- to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
- to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
- to.setClientDN(chunkData.getUser().getDn());
- if (chunkData.getUser() instanceof AbstractGridUser) {
- if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
- to.setVomsAttributes(((AbstractGridUser) chunkData.getUser())
- .getFQANsAsString());
- }
-
- }
- dao.update(to);
- }
-
- /**
- * Method that returns a Collection of PtPChunkData Objects matching the
- * supplied TRequestToken. If any of the data associated to the TRequestToken
- * is not well formed and so does not allow a PtPChunkData Object to be
- * created, then that part of the request is dropped, gets logged and an
- * attempt is made to write in the DB that the chunk was malformed; the
- * processing continues with the next part. Only the valid chunks get
- * returned. If there are no chunks to process then an empty Collection is
- * returned, and a messagge gets logged. NOTE! Chunks in SRM_ABORTED status
- * are NOT returned! This is imporant because this method is intended to be
- * used by the Feeders to fetch all chunks in the request, and aborted chunks
- * should not be picked up for processing!
- */
- synchronized public Collection lookup(
- final TRequestToken rt) {
-
- Collection chunkTOs = dao.find(rt);
- log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs);
- return buildChunkDataList(chunkTOs);
- }
-
- /**
- * Private method used to create a PtPChunkData object, from a PtPChunkDataTO
- * and TRequestToken. If a chunk cannot be created, an error messagge gets
- * logged and an attempt is made to signal in the DB that the chunk is
- * malformed.
- */
- private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) {
-
- StringBuilder errorSb = new StringBuilder();
- // toSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(auxTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (auxTO.normalizedStFN() != null) {
- toSURL.setNormalizedStFN(auxTO.normalizedStFN());
- }
- if (auxTO.surlUniqueID() != null) {
- toSURL.setUniqueID(auxTO.surlUniqueID().intValue());
- }
- // pinLifetime
- TLifeTimeInSeconds pinLifetime = null;
- try {
- long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(
- auxTO.pinLifetime());
- // Check for max value allowed
- long max = Configuration.getInstance().getPinLifetimeMaximum();
- if (pinLifeTime > max) {
- log.warn("PinLifeTime is greater than the max value allowed. Drop the "
- + "value to the max = {} seconds", max);
- pinLifeTime = max;
- }
- pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileLifetime
- TLifeTimeInSeconds fileLifetime = null;
- try {
- fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter
- .getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(auxTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- errorSb.append("\nTFileStorageType could not be translated from "
- + "its String representation! String: " + auxTO.fileStorageType());
- // Use the default value defined in Configuration.
- fileStorageType = TFileStorageType.getTFileStorageType(Configuration
- .getInstance().getDefaultFileStorageType());
- errorSb.append("\nUsed the default TFileStorageType as defined "
- + "in StoRM config.: " + fileStorageType);
- }
- // expectedFileSize
- //
- // WARNING! A converter is used because the DB uses 0 for empty, whereas
- // StoRM object model does allow a 0 size! Since this is an optional
- // field
- // in the SRM specs, null must be converted explicitly to Empty
- // TSizeInBytes
- // because it is indeed well formed!
- TSizeInBytes expectedFileSize = null;
- TSizeInBytes emptySize = TSizeInBytes.makeEmpty();
- long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(
- auxTO.expectedFileSize());
- if (emptySize.value() == sizeTranslation) {
- expectedFileSize = emptySize;
- } else {
- try {
- expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(),
- SizeUnit.BYTES);
- } catch (InvalidTSizeAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // spaceToken!
- //
- // WARNING! A converter is still needed because of DB logic for missing
- // SpaceToken makes use of NULL, whereas StoRM object model does not
- // allow
- // for null! It makes use of a specific Empty type.
- //
- // Indeed, the SpaceToken field is optional, so a request with a null
- // value
- // for the SpaceToken field in the DB, _is_ well formed!
- TSpaceToken spaceToken = null;
- TSpaceToken emptyToken = TSpaceToken.makeEmpty();
- /**
- * convert empty string representation of DPM into StoRM representation;
- */
- String spaceTokenTranslation = SpaceTokenStringConverter.getInstance()
- .toStoRM(auxTO.spaceToken());
- if (emptyToken.toString().equals(spaceTokenTranslation)) {
- spaceToken = emptyToken;
- } else {
- try {
- spaceToken = TSpaceToken.make(spaceTokenTranslation);
- } catch (InvalidTSpaceTokenAttributesException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- }
- // overwriteOption!
- TOverwriteMode overwriteOption = OverwriteModeConverter.getInstance()
- .toSTORM(auxTO.overwriteOption());
- if (overwriteOption == TOverwriteMode.EMPTY) {
- errorSb.append("\nTOverwriteMode could not be translated "
- + "from its String representation! String: " + auxTO.overwriteOption());
- overwriteOption = null;
- }
- // transferProtocols
- TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO
- .protocolList());
- if (transferProtocols.size() == 0) {
- errorSb.append("\nEmpty list of TransferProtocols "
- + "or could not translate TransferProtocols!");
- transferProtocols = null; // fail construction of PtPChunkData!
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance()
- .toSTORM(auxTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + auxTO.status());
- } else {
- status = new TReturnStatus(code, auxTO.errString());
- }
- GridUserInterface gridUser = null;
- try {
- if (auxTO.vomsAttributes() != null
- && !auxTO.vomsAttributes().trim().equals("")) {
- gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(),
- auxTO.vomsAttributesArray());
- } else {
- gridUser = GridUserManager.makeGridUser(auxTO.clientDN());
- }
-
- } catch (IllegalArgumentException e) {
- log.error("Unexpected error on voms grid user creation. "
- + "IllegalArgumentException: {}", e.getMessage(), e);
- }
-
- // transferURL
- /**
- * whatever is read is just meaningless because PtP will fill it in!!! So
- * create an Empty TTURL by default! Vital to avoid problems with unknown
- * DPM NULL/EMPTY logic policy!
- */
- TTURL transferURL = TTURL.makeEmpty();
- // make PtPChunkData
- PtPPersistentChunkData aux = null;
- try {
- aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime,
- fileLifetime, fileStorageType, spaceToken, expectedFileSize,
- transferProtocols, overwriteOption, status, transferURL);
- aux.setPrimaryKey(auxTO.primaryKey());
- } catch (InvalidPtPPersistentChunkDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidPtPDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidFileTransferDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- } catch (InvalidSurlRequestDataAttributesException e) {
- dao.signalMalformedPtPChunk(auxTO);
- log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
- + " from persistence. Dropping chunk from request: {}", rt);
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- /**
- *
- * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique
- * ID taken from the PtPChunkData
- *
- * @param chunkTO
- * @param chunk
- */
- private void completeTO(ReducedPtPChunkDataTO chunkTO,
- final ReducedPtPChunkData chunk) {
-
- chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN());
- chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId()));
- }
-
- /**
- *
- * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and
- * completes it with the normalized StFN and the SURL unique ID taken from the
- * PtGChunkData
- *
- * @param chunkTO
- * @param chunk
- * @return
- * @throws InvalidReducedPtPChunkDataAttributesException
- */
- private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO,
- final PtPPersistentChunkData chunk)
- throws InvalidReducedPtPChunkDataAttributesException {
-
- ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO);
- this.completeTO(reducedChunkTO, this.reduce(chunk));
- return reducedChunkTO;
- }
-
- /**
- * Creates a ReducedPtPChunkData from the data contained in the received
- * PtPChunkData
- *
- * @param chunk
- * @return
- * @throws InvalidReducedPtPChunkDataAttributesException
- */
- private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk)
- throws InvalidReducedPtPChunkDataAttributesException {
-
- ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(),
- chunk.getStatus(), chunk.fileStorageType(), chunk.fileLifetime());
- reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
- return reducedChunk;
- }
-
- /**
- * Creates a ReducedPtPChunkDataTO from the data contained in the received
- * PtPChunkDataTO
- *
- * @param chunkTO
- * @return
- */
- private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) {
-
- ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO();
- reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
- reducedChunkTO.setToSURL(chunkTO.toSURL());
- reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
- reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
- reducedChunkTO.setStatus(chunkTO.status());
- reducedChunkTO.setErrString(chunkTO.errString());
- return reducedChunkTO;
- }
-
- /**
- * Checks if the received PtPChunkDataTO contains the fields not set by the
- * front end but required
- *
- * @param chunkTO
- * @return
- */
- private boolean isComplete(PtPChunkDataTO chunkTO) {
-
- return (chunkTO.normalizedStFN() != null)
- && (chunkTO.surlUniqueID() != null);
- }
-
- /**
- * Checks if the received ReducedPtGChunkDataTO contains the fields not set by
- * the front end but required
- *
- * @param reducedChunkTO
- * @return
- */
- private boolean isComplete(ReducedPtPChunkDataTO reducedChunkTO) {
-
- return (reducedChunkTO.normalizedStFN() != null)
- && (reducedChunkTO.surlUniqueID() != null);
- }
-
- public Collection lookupReducedPtPChunkData(
- TRequestToken requestToken, Collection surls) {
-
- Collection reducedChunkDataTOs = dao.findReduced(
- requestToken.getValue(), surls);
- log.debug("PtP CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs);
- return buildReducedChunkDataList(reducedChunkDataTOs);
- }
-
- public Collection lookupPtPChunkData(TSURL surl,
- GridUserInterface user) {
-
- return lookupPtPChunkData(
- (List) Arrays.asList(new TSURL[] { surl }), user);
- }
-
- private Collection lookupPtPChunkData(
- List surls, GridUserInterface user) {
-
- int[] surlsUniqueIDs = new int[surls.size()];
- String[] surlsArray = new String[surls.size()];
- int index = 0;
- for (TSURL tsurl : surls) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surlsArray[index] = tsurl.rawSurl();
- index++;
- }
- Collection chunkDataTOs = dao.find(surlsUniqueIDs,
- surlsArray, user.getDn());
- log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs);
- return buildChunkDataList(chunkDataTOs);
- }
-
- private Collection buildChunkDataList(
- Collection chunkDataTOs) {
-
- ArrayList list = new ArrayList();
- PtPPersistentChunkData chunk;
- for (PtPChunkDataTO chunkTO : chunkDataTOs) {
- chunk = makeOne(chunkTO);
- if (chunk == null) {
- continue;
- }
- list.add(chunk);
- if (isComplete(chunkTO)) {
- continue;
- }
- try {
- dao.updateIncomplete(completeTO(chunkTO, chunk));
- } catch (InvalidReducedPtPChunkDataAttributesException e) {
- log.warn("PtG CHUNK CATALOG! unable to add missing informations on "
- + "DB to the request: {}", e.getMessage());
- }
- }
- log.debug("PtPChunkCatalog: returning {}\n\n", list);
- return list;
- }
-
- private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) {
-
- try {
- return makeOne(chunkTO,
- new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
- } catch (InvalidTRequestTokenAttributesException e) {
- throw new IllegalStateException(
- "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: "
- + e);
- }
- }
-
- private Collection buildReducedChunkDataList(
- Collection chunkDataTOCollection) {
-
- ArrayList list = new ArrayList();
- ReducedPtPChunkData reducedChunkData;
- for (ReducedPtPChunkDataTO reducedChunkDataTO : chunkDataTOCollection) {
- reducedChunkData = makeOneReduced(reducedChunkDataTO);
- if (reducedChunkData != null) {
- list.add(reducedChunkData);
- if (!this.isComplete(reducedChunkDataTO)) {
- this.completeTO(reducedChunkDataTO, reducedChunkData);
- dao.updateIncomplete(reducedChunkDataTO);
- }
- }
- }
- log.debug("PtP CHUNK CATALOG: returning {}", list);
- return list;
- }
-
- private ReducedPtPChunkData makeOneReduced(
- ReducedPtPChunkDataTO reducedChunkDataTO) {
-
- StringBuilder errorSb = new StringBuilder();
- // fromSURL
- TSURL toSURL = null;
- try {
- toSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.toSURL());
- } catch (InvalidTSURLAttributesException e) {
- errorSb.append(e);
- }
- if (reducedChunkDataTO.normalizedStFN() != null) {
- toSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN());
- }
- if (reducedChunkDataTO.surlUniqueID() != null) {
- toSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue());
- }
- // status
- TReturnStatus status = null;
- TStatusCode code = StatusCodeConverter.getInstance().toSTORM(
- reducedChunkDataTO.status());
- if (code == TStatusCode.EMPTY) {
- errorSb.append("\nRetrieved StatusCode was not recognised: "
- + reducedChunkDataTO.status());
- } else {
- status = new TReturnStatus(code, reducedChunkDataTO.errString());
- }
- // fileStorageType
- TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance()
- .toSTORM(reducedChunkDataTO.fileStorageType());
- if (fileStorageType == TFileStorageType.EMPTY) {
- errorSb.append("\nTFileStorageType could not be "
- + "translated from its String representation! String: "
- + reducedChunkDataTO.fileStorageType());
- // Use the default value defined in Configuration.
- fileStorageType = TFileStorageType.getTFileStorageType(Configuration
- .getInstance().getDefaultFileStorageType());
- errorSb
- .append("\nUsed the default TFileStorageType as defined in StoRM config.: "
- + fileStorageType);
- }
- // fileLifetime
- TLifeTimeInSeconds fileLifetime = null;
- try {
- fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter
- .getInstance().toStoRM(reducedChunkDataTO.fileLifetime()),
- TimeUnit.SECONDS);
- } catch (IllegalArgumentException e) {
- errorSb.append("\n");
- errorSb.append(e);
- }
- // make ReducedPtPChunkData
- ReducedPtPChunkData aux = null;
- try {
- aux = new ReducedPtPChunkData(toSURL, status, fileStorageType,
- fileLifetime);
- aux.setPrimaryKey(reducedChunkDataTO.primaryKey());
- } catch (InvalidReducedPtPChunkDataAttributesException e) {
- log.warn("PtP CHUNK CATALOG! Retrieved malformed Reduced PtP"
- + " chunk data from persistence: dropping reduced chunk...");
- log.warn(e.getMessage(), e);
- log.warn(errorSb.toString());
- }
- // end...
- return aux;
- }
-
- public int updateStatus(TRequestToken requestToken, TSURL surl,
- TStatusCode statusCode, String explanation) {
-
- return dao.updateStatus(requestToken, new int[] { surl.uniqueId() },
- new String[] { surl.rawSurl() }, statusCode, explanation);
- }
-
- public int updateFromPreviousStatus(TRequestToken requestToken,
- TStatusCode expectedStatusCode, TStatusCode newStatusCode,
- String explanation) {
-
- return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode,
- newStatusCode, explanation);
- }
-
- public int updateFromPreviousStatus(TRequestToken requestToken,
- List surlList, TStatusCode expectedStatusCode,
- TStatusCode newStatusCode) {
-
- int[] surlsUniqueIDs = new int[surlList.size()];
- String[] surls = new String[surlList.size()];
- int index = 0;
- for (TSURL tsurl : surlList) {
- surlsUniqueIDs[index] = tsurl.uniqueId();
- surls[index] = tsurl.rawSurl();
- index++;
- }
- return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls,
- expectedStatusCode, newStatusCode);
- }
+ private static final Logger log = LoggerFactory.getLogger(PtPChunkCatalog.class);
+
+ private static PtPChunkCatalog instance;
+
+ public static synchronized PtPChunkCatalog getInstance() {
+ if (instance == null) {
+ instance = new PtPChunkCatalog();
+ }
+ return instance;
+ }
+
+ private final PtPChunkDAO dao;
+
+ private PtPChunkCatalog() {
+ dao = PtPChunkDAOMySql.getInstance();
+ }
+
+ /**
+ * Method used to update into Persistence a retrieved PtPChunkData.
+ */
+ public synchronized void update(PtPPersistentChunkData chunkData) {
+
+ PtPChunkDataTO to = new PtPChunkDataTO();
+ /* Primary key needed by DAO Object */
+ to.setPrimaryKey(chunkData.getPrimaryKey());
+ to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode()));
+ to.setErrString(chunkData.getStatus().getExplanation());
+ to.setTransferURL(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString()));
+ to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(chunkData.pinLifetime().value()));
+ to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(chunkData.fileLifetime().value()));
+ to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(chunkData.fileStorageType()));
+ to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(chunkData.overwriteOption()));
+ to.setNormalizedStFN(chunkData.getSURL().normalizedStFN());
+ to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId()));
+ to.setClientDN(chunkData.getUser().getDn());
+ if (chunkData.getUser() instanceof AbstractGridUser) {
+ if (((AbstractGridUser) chunkData.getUser()).hasVoms()) {
+ to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString());
+ }
+
+ }
+ dao.update(to);
+ }
+
+ /**
+ * Method that returns a Collection of PtPChunkData Objects matching the supplied TRequestToken.
+ * If any of the data associated to the TRequestToken is not well formed and so does not allow a
+ * PtPChunkData Object to be created, then that part of the request is dropped, gets logged and an
+ * attempt is made to write in the DB that the chunk was malformed; the processing continues with
+ * the next part. Only the valid chunks get returned. If there are no chunks to process then an
+ * empty Collection is returned, and a message gets logged. NOTE! Chunks in SRM_ABORTED status are
+ * NOT returned! This is important because this method is intended to be used by the Feeders to
+ * fetch all chunks in the request, and aborted chunks should not be picked up for processing!
+ */
+ public synchronized Collection lookup(final TRequestToken rt) {
+
+ Collection chunkTOs = dao.find(rt);
+ log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs);
+ return buildChunkDataList(chunkTOs);
+ }
+
+ /**
+ * Private method used to create a PtPChunkData object, from a PtPChunkDataTO and TRequestToken.
+ * If a chunk cannot be created, an error messagge gets logged and an attempt is made to signal in
+ * the DB that the chunk is malformed.
+ */
+ private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) {
+
+ StringBuilder errorSb = new StringBuilder();
+ // toSURL
+ TSURL toSURL = null;
+ try {
+ toSURL = TSURL.makeFromStringValidate(auxTO.toSURL());
+ } catch (InvalidTSURLAttributesException e) {
+ errorSb.append(e);
+ }
+ if (auxTO.normalizedStFN() != null) {
+ toSURL.setNormalizedStFN(auxTO.normalizedStFN());
+ }
+ if (auxTO.surlUniqueID() != null) {
+ toSURL.setUniqueID(auxTO.surlUniqueID().intValue());
+ }
+ // pinLifetime
+ TLifeTimeInSeconds pinLifetime = null;
+ try {
+ long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.pinLifetime());
+ // Check for max value allowed
+ long max = Configuration.getInstance().getPinLifetimeMaximum();
+ if (pinLifeTime > max) {
+ log.warn("PinLifeTime is greater than the max value allowed. Drop the "
+ + "value to the max = {} seconds", max);
+ pinLifeTime = max;
+ }
+ pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // fileLifetime
+ TLifeTimeInSeconds fileLifetime = null;
+ try {
+ fileLifetime = TLifeTimeInSeconds
+ .make(FileLifetimeConverter.getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS);
+ } catch (IllegalArgumentException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ // fileStorageType
+ TFileStorageType fileStorageType =
+ FileStorageTypeConverter.getInstance().toSTORM(auxTO.fileStorageType());
+ if (fileStorageType == TFileStorageType.EMPTY) {
+ errorSb.append("\nTFileStorageType could not be translated from "
+ + "its String representation! String: " + auxTO.fileStorageType());
+ // Use the default value defined in Configuration.
+ fileStorageType = TFileStorageType
+ .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType());
+ errorSb.append("\nUsed the default TFileStorageType as defined " + "in StoRM config.: "
+ + fileStorageType);
+ }
+ // expectedFileSize
+ //
+ // WARNING! A converter is used because the DB uses 0 for empty, whereas
+ // StoRM object model does allow a 0 size! Since this is an optional
+ // field
+ // in the SRM specs, null must be converted explicitly to Empty
+ // TSizeInBytes
+ // because it is indeed well formed!
+ TSizeInBytes expectedFileSize = null;
+ TSizeInBytes emptySize = TSizeInBytes.makeEmpty();
+ long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(auxTO.expectedFileSize());
+ if (emptySize.value() == sizeTranslation) {
+ expectedFileSize = emptySize;
+ } else {
+ try {
+ expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), SizeUnit.BYTES);
+ } catch (InvalidTSizeAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ }
+ // spaceToken!
+ //
+ // WARNING! A converter is still needed because of DB logic for missing
+ // SpaceToken makes use of NULL, whereas StoRM object model does not
+ // allow
+ // for null! It makes use of a specific Empty type.
+ //
+ // Indeed, the SpaceToken field is optional, so a request with a null
+ // value
+ // for the SpaceToken field in the DB, _is_ well formed!
+ TSpaceToken spaceToken = null;
+ TSpaceToken emptyToken = TSpaceToken.makeEmpty();
+ /**
+ * convert empty string representation of DPM into StoRM representation;
+ */
+ String spaceTokenTranslation =
+ SpaceTokenStringConverter.getInstance().toStoRM(auxTO.spaceToken());
+ if (emptyToken.toString().equals(spaceTokenTranslation)) {
+ spaceToken = emptyToken;
+ } else {
+ try {
+ spaceToken = TSpaceToken.make(spaceTokenTranslation);
+ } catch (InvalidTSpaceTokenAttributesException e) {
+ errorSb.append("\n");
+ errorSb.append(e);
+ }
+ }
+ // overwriteOption!
+ TOverwriteMode overwriteOption =
+ OverwriteModeConverter.getInstance().toSTORM(auxTO.overwriteOption());
+ if (overwriteOption == TOverwriteMode.EMPTY) {
+ errorSb.append("\nTOverwriteMode could not be translated "
+ + "from its String representation! String: " + auxTO.overwriteOption());
+ overwriteOption = null;
+ }
+ // transferProtocols
+ TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.protocolList());
+ if (transferProtocols.size() == 0) {
+ errorSb
+ .append("\nEmpty list of TransferProtocols " + "or could not translate TransferProtocols!");
+ transferProtocols = null; // fail construction of PtPChunkData!
+ }
+ // status
+ TReturnStatus status = null;
+ TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.status());
+ if (code == TStatusCode.EMPTY) {
+ errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.status());
+ } else {
+ status = new TReturnStatus(code, auxTO.errString());
+ }
+ GridUserInterface gridUser = null;
+ try {
+ if (auxTO.vomsAttributes() != null && !auxTO.vomsAttributes().trim().equals("")) {
+ gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), auxTO.vomsAttributesArray());
+ } else {
+ gridUser = GridUserManager.makeGridUser(auxTO.clientDN());
+ }
+
+ } catch (IllegalArgumentException e) {
+ log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}",
+ e.getMessage(), e);
+ }
+
+ // transferURL
+ /**
+ * whatever is read is just meaningless because PtP will fill it in!!! So create an Empty TTURL
+ * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy!
+ */
+ TTURL transferURL = TTURL.makeEmpty();
+ // make PtPChunkData
+ PtPPersistentChunkData aux = null;
+ try {
+ aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, fileLifetime,
+ fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status,
+ transferURL);
+ aux.setPrimaryKey(auxTO.primaryKey());
+ } catch (InvalidPtPPersistentChunkDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidPtPDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidFileTransferDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ } catch (InvalidSurlRequestDataAttributesException e) {
+ dao.fail(auxTO);
+ log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data"
+ + " from persistence. Dropping chunk from request: {}", rt);
+ log.warn(e.getMessage(), e);
+ log.warn(errorSb.toString());
+ }
+ // end...
+ return aux;
+ }
+
+ /**
+ *
+ * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique ID taken from the
+ * PtPChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ */
+ private void completeTO(ReducedPtPChunkDataTO chunkTO, final ReducedPtPChunkData chunk) {
+
+ chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN());
+ chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId()));
+ }
+
+ /**
+ *
+ * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the
+ * normalized StFN and the SURL unique ID taken from the PtGChunkData
+ *
+ * @param chunkTO
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtPChunkDataAttributesException
+ */
+ private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO,
+ final PtPPersistentChunkData chunk) throws InvalidReducedPtPChunkDataAttributesException {
+
+ ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO);
+ this.completeTO(reducedChunkTO, this.reduce(chunk));
+ return reducedChunkTO;
+ }
+
+ /**
+ * Creates a ReducedPtPChunkData from the data contained in the received PtPChunkData
+ *
+ * @param chunk
+ * @return
+ * @throws InvalidReducedPtPChunkDataAttributesException
+ */
+ private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk)
+ throws InvalidReducedPtPChunkDataAttributesException {
+
+ ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), chunk.getStatus(),
+ chunk.fileStorageType(), chunk.fileLifetime());
+ reducedChunk.setPrimaryKey(chunk.getPrimaryKey());
+ return reducedChunk;
+ }
+
+ /**
+ * Creates a ReducedPtPChunkDataTO from the data contained in the received PtPChunkDataTO
+ *
+ * @param chunkTO
+ * @return
+ */
+ private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) {
+
+ ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO();
+ reducedChunkTO.setPrimaryKey(chunkTO.primaryKey());
+ reducedChunkTO.setToSURL(chunkTO.toSURL());
+ reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN());
+ reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID());
+ reducedChunkTO.setStatus(chunkTO.status());
+ reducedChunkTO.setErrString(chunkTO.errString());
+ return reducedChunkTO;
+ }
+
+ /**
+ * Checks if the received PtPChunkDataTO contains the fields not set by the front end but required
+ *
+ * @param chunkTO
+ * @return
+ */
+ private boolean isComplete(PtPChunkDataTO chunkTO) {
+
+ return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null);
+ }
+
+ public Collection lookupPtPChunkData(TSURL surl, GridUserInterface user) {
+
+ return lookupPtPChunkData((List) Arrays.asList(new TSURL[] {surl}), user);
+ }
+
+ private Collection lookupPtPChunkData(List surls,
+ GridUserInterface user) {
+
+ int[] surlsUniqueIDs = new int[surls.size()];
+ String[] surlsArray = new String[surls.size()];
+ int index = 0;
+ for (TSURL tsurl : surls) {
+ surlsUniqueIDs[index] = tsurl.uniqueId();
+ surlsArray[index] = tsurl.rawSurl();
+ index++;
+ }
+ Collection chunkDataTOs = dao.find(surlsUniqueIDs, surlsArray, user.getDn());
+ log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs);
+ return buildChunkDataList(chunkDataTOs);
+ }
+
+ private Collection buildChunkDataList(
+ Collection chunkDataTOs) {
+
+ Collection list = Lists.newArrayList();
+ PtPPersistentChunkData chunk;
+ for (PtPChunkDataTO chunkTO : chunkDataTOs) {
+ chunk = makeOne(chunkTO);
+ if (chunk == null) {
+ continue;
+ }
+ list.add(chunk);
+ if (isComplete(chunkTO)) {
+ continue;
+ }
+ try {
+ dao.updateIncomplete(completeTO(chunkTO, chunk));
+ } catch (InvalidReducedPtPChunkDataAttributesException e) {
+ log.warn(
+ "PtG CHUNK CATALOG! unable to add missing informations on " + "DB to the request: {}",
+ e.getMessage());
+ }
+ }
+ log.debug("PtPChunkCatalog: returning {}\n\n", list);
+ return list;
+ }
+
+ private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) {
+
+ try {
+ return makeOne(chunkTO, new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp()));
+ } catch (InvalidTRequestTokenAttributesException e) {
+ throw new IllegalStateException(
+ "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " + e);
+ }
+ }
+
+ public int updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode,
+ String explanation) {
+
+ return dao.updateStatus(requestToken, new int[] {surl.uniqueId()},
+ new String[] {surl.rawSurl()}, statusCode, explanation);
+ }
+
+ public int updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode,
+ TStatusCode newStatusCode, String explanation) {
+
+ return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode,
+ explanation);
+ }
+
+ public int updateFromPreviousStatus(TRequestToken requestToken, List surlList,
+ TStatusCode expectedStatusCode, TStatusCode newStatusCode) {
+
+ int[] surlsUniqueIDs = new int[surlList.size()];
+ String[] surls = new String[surlList.size()];
+ int index = 0;
+ for (TSURL tsurl : surlList) {
+ surlsUniqueIDs[index] = tsurl.uniqueId();
+ surls[index] = tsurl.rawSurl();
+ index++;
+ }
+ return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode,
+ newStatusCode);
+ }
}
diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java
deleted file mode 100644
index b6d89c3c1..000000000
--- a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java
+++ /dev/null
@@ -1,1683 +0,0 @@
-/*
- *
- * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package it.grid.storm.catalogs;
-
-import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray;
-import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings;
-import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED;
-import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE;
-import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED;
-import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS;
-import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE;
-import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-import it.grid.storm.config.Configuration;
-import it.grid.storm.namespace.NamespaceException;
-import it.grid.storm.namespace.naming.SURL;
-import it.grid.storm.srm.types.TRequestToken;
-import it.grid.storm.srm.types.TSURL;
-import it.grid.storm.srm.types.TStatusCode;
-
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-
-/**
- * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect
- * to a MySQL DB. The raw data found in those tables is pre-treated in order to
- * turn it into the Object Model of StoRM. See Method comments for further info.
- * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the
- * object model.
- *
- * @author EGRID ICTP
- * @version 2.0
- * @date June 2005
- */
-public class PtPChunkDAO {
-
- private static final Logger log = LoggerFactory.getLogger(PtPChunkDAO.class);
-
- /* String with the name of the class for the DB driver */
- private final String driver = Configuration.getInstance().getDBDriver();
- /* String referring to the URL of the DB */
- private final String url = Configuration.getInstance().getDBURL();
- /* String with the password for the DB */
- private final String password = Configuration.getInstance().getDBPassword();
- /* String with the name for the DB */
- private final String name = Configuration.getInstance().getDBUserName();
- /* Connection to DB - WARNING!!! It is kept open all the time! */
- private Connection con = null;
-
- private static final PtPChunkDAO dao = new PtPChunkDAO();
-
- /* timer thread that will run a task to alert when reconnecting is necessary! */
- private Timer clock = null;
- /*
- * timer task that will update the boolean signaling that a reconnection is
- * needed
- */
- private TimerTask clockTask = null;
- /* milliseconds that must pass before reconnecting to DB */
- private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000;
- /* initial delay in milliseconds before starting timer */
- private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000;
-
- /* boolean that tells whether reconnection is needed because of MySQL bug! */
- private boolean reconnect = false;
-
- private StatusCodeConverter statusCodeConverter = StatusCodeConverter.getInstance();
-
- private PtPChunkDAO() {
-
- setUpConnection();
- clock = new Timer();
- clockTask = new TimerTask() {
-
- @Override
- public void run() {
-
- reconnect = true;
- }
- }; // clock task
- clock.scheduleAtFixedRate(clockTask, delay, period);
- }
-
- /**
- * Method that returns the only instance of the PtPChunkDAO.
- */
- public static PtPChunkDAO getInstance() {
-
- return dao;
- }
-
- /**
- * Method used to save the changes made to a retrieved PtPChunkDataTO, back
- * into the MySQL DB. Only the transferURL, statusCode and explanation, of
- * status_Put table get written to the DB. Likewise for the pinLifetime and
- * fileLifetime of request_queue. In case of any error, an error messagge gets
- * logged but no exception is thrown.
- */
- public synchronized void update(PtPChunkDataTO to) {
-
- if (!checkConnection()) {
- log.error("PtP CHUNK DAO: update - unable to get a valid connection!");
- return;
- }
- PreparedStatement updatePut = null;
- try {
- // prepare statement...
- updatePut = con
- .prepareStatement("UPDATE "
- + "request_queue rq JOIN (status_Put sp, request_Put rp) ON "
- + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) "
- + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, "
- + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? "
- + "WHERE rp.ID=?");
- printWarnings(con.getWarnings());
-
- updatePut.setString(1, to.transferURL());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(2, to.status());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(3, to.errString());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(4, to.pinLifetime());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(5, to.fileLifetime());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(6, to.fileStorageType());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(7, to.overwriteOption());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setString(8, to.normalizedStFN());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setInt(9, to.surlUniqueID());
- printWarnings(updatePut.getWarnings());
-
- updatePut.setLong(10, to.primaryKey());
- printWarnings(updatePut.getWarnings());
- // run updateStatusPut...
- log.trace("PtP CHUNK DAO - update method: {}", updatePut);
- updatePut.executeUpdate();
- printWarnings(updatePut.getWarnings());
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e);
- } finally {
- close(updatePut);
- }
- }
-
- /**
- * Updates the request_Put represented by the received ReducedPtPChunkDataTO
- * by setting its normalized_targetSURL_StFN and targetSURL_uniqueID
- *
- * @param chunkTO
- */
- public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) {
-
- if (!checkConnection()) {
- log
- .error("PtP CHUNK DAO: updateIncomplete - unable to get a valid connection!");
- return;
- }
- String str = "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? "
- + "WHERE ID=?";
- PreparedStatement stmt = null;
- try {
- stmt = con.prepareStatement(str);
- printWarnings(con.getWarnings());
-
- stmt.setString(1, chunkTO.normalizedStFN());
- printWarnings(stmt.getWarnings());
-
- stmt.setInt(2, chunkTO.surlUniqueID());
- printWarnings(stmt.getWarnings());
-
- stmt.setLong(3, chunkTO.primaryKey());
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - update incomplete: {}", stmt);
- stmt.executeUpdate();
- printWarnings(stmt.getWarnings());
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}",
- e.getMessage(), e);
- } finally {
- close(stmt);
- }
- }
-
- /**
- * Method used to refresh the PtPChunkDataTO information from the MySQL DB.
- * This method is intended to be used during the srmAbortRequest/File
- * operation. In case of any error, an error message gets logged but no
- * exception is thrown; a null PtPChunkDataTO is returned.
- */
- public synchronized PtPChunkDataTO refresh(long id) {
-
- if (!checkConnection()) {
- log.error("PtP CHUNK DAO: refresh - unable to get a valid connection!");
- return null;
- }
- String prot = "SELECT tp.config_ProtocolsID FROM request_TransferProtocols tp "
- + "WHERE tp.request_queueID IN "
- + "(SELECT rp.request_queueID FROM request_Put rp WHERE rp.ID=?)";
-
- String refresh = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.r_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode, sp.transferURL "
- + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) "
- + "ON (rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) "
- + "WHERE rp.ID=?";
-
- PreparedStatement stmt = null;
- ResultSet rs = null;
- PtPChunkDataTO chunkDataTO = null;
-
- try {
- // get protocols for the request
- stmt = con.prepareStatement(prot);
- printWarnings(con.getWarnings());
-
- List protocols = Lists.newArrayList();
- stmt.setLong(1, id);
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - refresh method: {}", stmt);
- rs = stmt.executeQuery();
- printWarnings(stmt.getWarnings());
- while (rs.next()) {
- protocols.add(rs.getString("tp.config_ProtocolsID"));
- }
- close(rs);
- close(stmt);
-
- // get chunk of the request
- stmt = con.prepareStatement(refresh);
- printWarnings(con.getWarnings());
-
- stmt.setLong(1, id);
- printWarnings(stmt.getWarnings());
-
- log.trace("PtP CHUNK DAO - refresh method: {}", stmt);
- rs = stmt.executeQuery();
- printWarnings(stmt.getWarnings());
-
- if (rs.next()) {
- chunkDataTO = new PtPChunkDataTO();
- chunkDataTO.setFileStorageType(rs
- .getString("rq.config_FileStorageTypeID"));
- chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID"));
- chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp"));
- chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime"));
- chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime"));
- chunkDataTO.setSpaceToken(rs.getString("rq.s_token"));
- chunkDataTO.setRequestToken(rs.getString("rq.r_token"));
- chunkDataTO.setPrimaryKey(rs.getLong("rp.ID"));
- chunkDataTO.setToSURL(rs.getString("rp.targetSURL"));
- chunkDataTO.setNormalizedStFN(rs
- .getString("rp.normalized_targetSURL_StFN"));
- int uniqueID = rs.getInt("rp.targetSURL_uniqueID");
- if (!rs.wasNull()) {
- chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID));
- }
-
- chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize"));
- chunkDataTO.setProtocolList(protocols);
- chunkDataTO.setStatus(rs.getInt("sp.statusCode"));
- chunkDataTO.setTransferURL(rs.getString("sp.transferURL"));
- chunkDataTO.setClientDN(rs.getString("rq.client_dn"));
-
- /**
- * This code is only for the 1.3.18. This is a workaround to get FQANs
- * using the proxy field on request_queue. The FE use the proxy field of
- * request_queue to insert a single FQAN string containing all FQAN
- * separated by the "#" char. The proxy is a BLOB, hence it has to be
- * properly converted in string.
- */
- java.sql.Blob blob = rs.getBlob("rq.proxy");
- if (!rs.wasNull() && blob != null) {
- byte[] bdata = blob.getBytes(1, (int) blob.length());
- chunkDataTO.setVomsAttributes(new String(bdata));
- }
- if (rs.next()) {
- log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! "
- + "refresh method invoked for specific chunk with id {}, but found "
- + "more than one such chunks!", id);
- }
- } else {
- log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! "
- + "refresh method invoked for specific chunk with id {}, but chunk "
- + "NOT found in persistence!", id);
- }
- } catch (SQLException e) {
- log.error("PtP CHUNK DAO! Unable to refresh chunk! {}", e.getMessage(), e);
- chunkDataTO = null;
- } finally {
- close(rs);
- close(stmt);
- }
- return chunkDataTO;
- }
-
- /**
- * Method that queries the MySQL DB to find all entries matching the supplied
- * TRequestToken. The Collection contains the corresponding PtPChunkDataTO
- * objects. An initial simple query establishes the list of protocols
- * associated with the request. A second complex query establishes all chunks
- * associated with the request, by properly joining request_queue, request_Put
- * and status_Put. The considered fields are: (1) From status_Put: the ID
- * field which becomes the TOs primary key, and statusCode. (2) From
- * request_Put: targetSURL and expectedFileSize. (3) From request_queue:
- * pinLifetime, fileLifetime, config_FileStorageTypeID, s_token,
- * config_OverwriteID. In case of any error, a log gets written and an empty
- * collection is returned. No exception is returned. NOTE! Chunks in
- * SRM_ABORTED status are NOT returned! This is important because this method
- * is intended to be used by the Feeders to fetch all chunks in the request,
- * and aborted chunks should not be picked up for processing!
- */
- public synchronized Collection