diff --git a/pom.xml b/pom.xml index 00fdd7e02..c3cfb9403 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ StoRM Backend server org.italiangrid.storm storm-backend-server - 1.11.19 + 1.12.0 @@ -18,13 +18,13 @@ 3.5.0.Final 3.3.0 1.1 - 1.2.1 + 2.8.0 2.0.1 0.5.2 3.0 1.1 20080701 - 4.8.1 + 4.13.1 1.0 0.2 3.1 @@ -35,7 +35,7 @@ 1.4.6 2.3.3 - 5.1.12 + 8.0.16 18.0 3.1.0 @@ -376,8 +376,8 @@ - commons-dbcp - commons-dbcp + org.apache.commons + commons-dbcp2 ${commonsDbcpVersion} diff --git a/src/main/java/it/grid/storm/Constants.java b/src/main/java/it/grid/storm/Constants.java index ca61cbf93..448378341 100644 --- a/src/main/java/it/grid/storm/Constants.java +++ b/src/main/java/it/grid/storm/Constants.java @@ -35,7 +35,6 @@ public class Constants { private static final Logger log = LoggerFactory.getLogger(Constants.class); public static final Entry BE_VERSION; - public static final Entry NAMESPACE_VERSION; public static final Entry BE_OS_DISTRIBUTION; public static final Entry BE_OS_PLATFORM; public static final Entry BE_OS_KERNEL_RELEASE; @@ -49,7 +48,6 @@ private Constants() {} static { BE_VERSION = new Entry("BE-Version", Constants.class.getPackage().getImplementationVersion()); - NAMESPACE_VERSION = new Entry("Namespace-version", "1.5.0"); BE_OS_DISTRIBUTION = new Entry("BE-OS-Distribution", getDistribution()); Map map = getPlatformKernel(); BE_OS_PLATFORM = new Entry(BE_OS_PLATFORM_KEY, map.get(BE_OS_PLATFORM_KEY)); diff --git a/src/main/java/it/grid/storm/ShutdownHook.java b/src/main/java/it/grid/storm/ShutdownHook.java index 09cb21c22..2dffea96f 100644 --- a/src/main/java/it/grid/storm/ShutdownHook.java +++ b/src/main/java/it/grid/storm/ShutdownHook.java @@ -28,6 +28,7 @@ public void run() { storm.stopSpaceGC(); storm.stopExpiredAgent(); storm.stopDiskUsageService(); + storm.stopRequestGarbageCollector(); GPFSQuotaManager.INSTANCE.shutdown(); log.info("StoRM: Backend successfully stopped."); diff --git a/src/main/java/it/grid/storm/StoRM.java b/src/main/java/it/grid/storm/StoRM.java index 2c4852fd3..c18b63a51 100644 --- a/src/main/java/it/grid/storm/StoRM.java +++ b/src/main/java/it/grid/storm/StoRM.java @@ -30,8 +30,8 @@ import it.grid.storm.asynch.AdvancedPicker; import it.grid.storm.catalogs.ReservedSpaceCatalog; -import it.grid.storm.catalogs.StoRMDataSource; -import it.grid.storm.catalogs.timertasks.ExpiredPutRequestsAgent; +import it.grid.storm.catalogs.executors.RequestFinalizerService; +import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; import it.grid.storm.check.CheckManager; import it.grid.storm.check.CheckResponse; import it.grid.storm.check.CheckStatus; @@ -72,13 +72,16 @@ public class StoRM { private boolean isSpaceGCRunning = false; /* - * Timer object in charge of transit expired put requests from SRM_SPACE_AVAILABLE to - * SRM_FILE_LIFETIME_EXPIRED and from SRM_REQUEST_INPROGRESS to SRM_FAILURE + * Agent in charge of transit expired ptg/ptp/bol requests to final statuses */ - private final Timer transiter = new Timer(); - private TimerTask expiredAgent; + private RequestFinalizerService expiredAgent; private boolean isExpiredAgentRunning = false; + /* Requests Garbage Collector */ + private final Timer rgc = new Timer(); + private TimerTask rgcTask; + private boolean isRequestGCRunning = false; + private boolean isDiskUsageServiceEnabled = false; private DiskUsageService duService; @@ -96,7 +99,7 @@ public StoRM() { config = Configuration.getInstance(); picker = new AdvancedPicker(); - spaceCatalog = new ReservedSpaceCatalog(); + spaceCatalog = ReservedSpaceCatalog.getInstance(); } @@ -108,8 +111,6 @@ public void init() throws BootstrapException { configureMetricsReporting(); - configureStoRMDataSource(); - loadNamespaceConfiguration(); HealthDirector.initializeDirector(false); @@ -216,11 +217,6 @@ private void performSanityChecks() throws BootstrapException { } - private void configureStoRMDataSource() { - - StoRMDataSource.init(); - } - /** * Method used to start the picker. */ @@ -247,14 +243,6 @@ public synchronized void stopPicker() { isPickerRunning = false; } - /** - * @return - */ - public synchronized boolean pickerIsRunning() { - - return isPickerRunning; - } - /** * Method used to start xmlrpcServer. * @@ -382,14 +370,6 @@ public synchronized void stopSpaceGC() { isSpaceGCRunning = false; } - /** - * @return - */ - public synchronized boolean spaceGCIsRunning() { - - return isSpaceGCRunning; - } - /** * Starts the internal timer needed to periodically check and transit requests whose pinLifetime * has expired and are in SRM_SPACE_AVAILABLE, to SRM_FILE_LIFETIME_EXPIRED. Moreover, the @@ -405,16 +385,8 @@ public synchronized void startExpiredAgent() { return; } - /* Delay time before starting cleaning thread! Set to 1 minute */ - final long delay = config.getTransitInitialDelay() * 1000L; - /* Period of execution of cleaning! Set to 1 hour */ - final long period = config.getTransitTimeInterval() * 1000L; - /* Expiration time before starting move in-progress requests to failure */ - final long inProgressExpirationTime = config.getInProgressPutRequestExpirationTime(); - log.debug("Starting Expired Agent."); - expiredAgent = new ExpiredPutRequestsAgent(inProgressExpirationTime); - transiter.scheduleAtFixedRate(expiredAgent, delay, period); + expiredAgent = new RequestFinalizerService(config); isExpiredAgentRunning = true; log.debug("Expired Agent started."); } @@ -428,7 +400,7 @@ public synchronized void stopExpiredAgent() { log.debug("Stopping Expired Agent."); if (expiredAgent != null) { - expiredAgent.cancel(); + expiredAgent.stop(); } log.debug("Expired Agent stopped."); isExpiredAgentRunning = false; @@ -498,6 +470,40 @@ public synchronized void stopDiskUsageService() { } } + public synchronized void startRequestGarbageCollector() { + + if (isRequestGCRunning) { + log.debug("Requests Garbage Collector is already running."); + return; + } + + /* Delay time before starting cleaning thread */ + final long delay = config.getRequestPurgerDelay() * 1000L; + /* Period of execution of cleaning */ + final long period = config.getRequestPurgerPeriod() * 1000L; + + log.debug("Starting Requests Garbage Collector ."); + rgcTask = new RequestsGarbageCollector(rgc, period); + rgc.schedule(rgcTask, delay); + isRequestGCRunning = true; + log.debug("Requests Garbage Collector started."); + } + + public synchronized void stopRequestGarbageCollector() { + + if (!isRequestGCRunning) { + log.debug("Requests Garbage Collector is not running."); + return; + } + + log.debug("Stopping Requests Garbage Collector."); + if (rgcTask != null) { + rgcTask.cancel(); + } + log.debug("Requests Garbage Collector stopped."); + isRequestGCRunning = false; + } + public void startServices() throws Exception { startPicker(); @@ -505,6 +511,7 @@ public void startServices() throws Exception { startRestServer(); startSpaceGC(); startExpiredAgent(); + startRequestGarbageCollector(); startDiskUsageService(); } @@ -515,6 +522,7 @@ public void stopServices() { stopRestServer(); stopSpaceGC(); stopExpiredAgent(); + stopRequestGarbageCollector(); stopDiskUsageService(); GPFSQuotaManager.INSTANCE.shutdown(); diff --git a/src/main/java/it/grid/storm/acl/AclManager.java b/src/main/java/it/grid/storm/acl/AclManager.java index 5a9f03de9..5eba59c74 100644 --- a/src/main/java/it/grid/storm/acl/AclManager.java +++ b/src/main/java/it/grid/storm/acl/AclManager.java @@ -114,60 +114,4 @@ FilesystemPermission setGroupPermission(LocalFile localFile, LocalUser localUser FilesystemPermission setUserPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException; - /** - * @param localFile an existent file - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException; - - /** - * @param localFile an existent file - * @param localUser - * @param permission - * @throws IllegalArgumentException if received null parameters or the LocalFile object refers to - * a not existent file - */ - void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException; - - /** - * @param oldLocalFile an existent source file - * @param newLocalFile an existent destination file - * @throws IllegalArgumentException if received null parameters or the LocalFile objects refers to - * not existent files - */ - void moveHttpsPermissions(LocalFile oldLocalFile, LocalFile newLocalFile) - throws IllegalArgumentException; - } diff --git a/src/main/java/it/grid/storm/acl/AclManagerFS.java b/src/main/java/it/grid/storm/acl/AclManagerFS.java index 52f813bea..028c441d0 100644 --- a/src/main/java/it/grid/storm/acl/AclManagerFS.java +++ b/src/main/java/it/grid/storm/acl/AclManagerFS.java @@ -27,12 +27,6 @@ public static AclManager getInstance() { return instance; } - /* - * (non-Javadoc) - * - * @see it.grid.storm.acl.AclManager#grantGroupPermission(it.grid.storm.griduser .LocalUser, - * it.grid.storm.filesystem.FilesystemPermission) - */ @Override public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException { @@ -50,12 +44,6 @@ public FilesystemPermission grantGroupPermission(LocalFile localFile, LocalUser return newPermission; } - /* - * (non-Javadoc) - * - * @see it.grid.storm.acl.AclManager#grantUserPermission(it.grid.storm.filesystem .LocalFile, - * it.grid.storm.griduser.LocalUser, it.grid.storm.filesystem.FilesystemPermission) - */ @Override public FilesystemPermission grantUserPermission(LocalFile localFile, LocalUser localUser, FilesystemPermission permission) throws IllegalArgumentException { @@ -175,68 +163,4 @@ public FilesystemPermission setUserPermission(LocalFile localFile, LocalUser loc return newPermission; } - @Override - public void removeHttpsPermissions(LocalFile localFile) throws IllegalArgumentException { - - if (localFile == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received file parameter is null"); - } - } - - @Override - public void grantHttpsUserPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException { - - if (localFile == null || localUser == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " localUser=" + localUser + " permission=" + permission); - } - } - - @Override - public void grantHttpsServiceUserPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException { - - if (localFile == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " permission=" + permission); - } - } - - @Override - public void grantHttpsGroupPermission(LocalFile localFile, LocalUser localUser, - FilesystemPermission permission) throws IllegalArgumentException { - - if (localFile == null || localUser == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " localUser=" + localUser + " permission=" + permission); - } - } - - @Override - public void grantHttpsServiceGroupPermission(LocalFile localFile, FilesystemPermission permission) - throws IllegalArgumentException { - - if (localFile == null || permission == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: localFile=" + localFile - + " permission=" + permission); - } - } - - @Override - public void moveHttpsPermissions(LocalFile fromLocalFile, LocalFile toLocalFile) - throws IllegalArgumentException { - - if (fromLocalFile == null || toLocalFile == null) { - throw new IllegalArgumentException( - "Unable to perform the operation. The received null parameters: fromLocalFile=" - + fromLocalFile + " toLocalFile=" + toLocalFile); - } - } - } diff --git a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java index c990a3c74..8d6f7d0b8 100644 --- a/src/main/java/it/grid/storm/asynch/AdvancedPicker.java +++ b/src/main/java/it/grid/storm/asynch/AdvancedPicker.java @@ -18,8 +18,8 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.CrusherScheduler; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.scheduler.SchedulerStatus; @@ -281,31 +281,4 @@ synchronized public boolean abortRequest(TRequestToken rt) { return true; } - /** - * Method used to remove chunks of the request identified by the supplied TRequestToken, with - * surls given by the collection c. Chunks in the DB get their status changed and so will not be - * considered for processing. - * - * If a null TRequestToken or Collection is supplied, or some other abort request has been issued, - * then FALSE is returned; otherwise TRUE is returned. - */ - synchronized public boolean abortChunksOfRequest(TRequestToken rt, Collection c) { - - if (abort) { - - return false; - } - - if ((rt == null) || (c == null)) { - - return false; - } - - abortToken = rt; - abortSURLS = c; - abort = true; - - return true; - } - } diff --git a/src/main/java/it/grid/storm/asynch/BoL.java b/src/main/java/it/grid/storm/asynch/BoL.java index cb37db2bb..32cc02cef 100644 --- a/src/main/java/it/grid/storm/asynch/BoL.java +++ b/src/main/java/it/grid/storm/asynch/BoL.java @@ -17,11 +17,9 @@ package it.grid.storm.asynch; -import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.BoLData; -import it.grid.storm.catalogs.RequestData; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.common.types.SizeUnit; @@ -35,21 +33,18 @@ import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.BoLData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * Class that represents a chunk of an srmBringOnLine request: it handles a single file of a * multifile/directory-expansion request. StoRM then sends the chunk to a chunk-scheduler. Security @@ -231,20 +226,6 @@ public void doIt() { } } - SpaceHelper sp = new SpaceHelper(); - TSpaceToken token = sp.getTokenFromStoRI(log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - if (!spaceAuth.authorize(gu, SRMSpaceRequest.BOL)) { - String emsg = - String.format("Space authorization denied %s" + " in Storage Area: %s", surl, token); - log.debug(emsg); - requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg); - failure = true; - printOutcome(dn, surl, requestData.getStatus()); - return; - } - manageIsPermit(fileStoRI); printOutcome(dn, surl, requestData.getStatus()); } diff --git a/src/main/java/it/grid/storm/asynch/BoLFeeder.java b/src/main/java/it/grid/storm/asynch/BoLFeeder.java index 483bfe179..078895705 100644 --- a/src/main/java/it/grid/storm/asynch/BoLFeeder.java +++ b/src/main/java/it/grid/storm/asynch/BoLFeeder.java @@ -18,10 +18,7 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; @@ -31,6 +28,9 @@ import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; diff --git a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java index 5979c88a5..a3e145d25 100644 --- a/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/BoLPersistentChunk.java @@ -18,9 +18,9 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; diff --git a/src/main/java/it/grid/storm/asynch/BuilderException.java b/src/main/java/it/grid/storm/asynch/BuilderException.java index 9513c5b8e..87e4f71f1 100644 --- a/src/main/java/it/grid/storm/asynch/BuilderException.java +++ b/src/main/java/it/grid/storm/asynch/BuilderException.java @@ -36,14 +36,4 @@ public BuilderException(String message) { super(message); } - - public BuilderException(Throwable cause) { - - super(cause); - } - - public BuilderException(String message, Throwable cause) { - - super(message, cause); - } } diff --git a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java index 2b138f2a3..71f1616e2 100644 --- a/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java +++ b/src/main/java/it/grid/storm/asynch/GlobalStatusManager.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.ChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; +import it.grid.storm.persistence.model.ChunkData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TStatusCode; diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java deleted file mode 100644 index d56575f67..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLChunkAttributesException.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exception thrown when a BoLChunk is created with any null attribute: - * GridUser, RequestSummaryData, BoLChunkData or GlobalStatusManager. - * - * @author: CNAF - * @version: 1.0 - * @date: Aug 2009 - */ -public class InvalidBoLChunkAttributesException extends Exception { - - private static final long serialVersionUID = 2320080131526579634L; - - private final boolean nullGu; // true if GridUser is null - private final boolean nullRsd; // true if RequestSummaryData is null - private final boolean nullChunkData; // true if BoLChunkData is null - private final boolean nullGlobalStatusManager; // true if gsm is null - - /** - * Constructor that requires the GridUser, RequestSummaryData, BoLChunkData and - * GlobalStatusManager that caused the exception to be thrown. - */ - public InvalidBoLChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd, - BoLPersistentChunkData chunkData, GlobalStatusManager gsm) { - - nullGu = (gu == null); - nullRsd = (rsd == null); - nullChunkData = (chunkData == null); - nullGlobalStatusManager = (gsm == null); - } - - @Override - public String toString() { - - return String.format( - "Invalid attributes when creating BoLChunk: " - + "nullGridUser=%b; nullRequestSumamryData=%b; nullBoLChunkData=%b; " - + "nullGlobalStatusManager=%b", - nullGu, nullRsd, nullChunkData, nullGlobalStatusManager); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java index 8a3af9060..2a9039328 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidBoLFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a BoLFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java index 9e55f6fe0..bef52a491 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPersistentRequestAttributesException.java @@ -11,9 +11,9 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; /** * This class represents an Exceptin thrown when a PtPChunk is created with any null attribute: diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java index cf3333a4f..2ce14206a 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGAttributesException.java @@ -1,7 +1,7 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtGData; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java deleted file mode 100644 index 12a830e62..000000000 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGChunkAttributesException.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.catalogs.RequestSummaryData; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exceptin thrown when a PtGChunk is created with any null attribute: - * GridUser, RequestSummaryData, PtGChunkData or GlobalStatusManager. - * - * @author: EGRID - ICTP Trieste - * @version: 2.0 - * @date: May 16th, 2005 - */ -public class InvalidPtGChunkAttributesException extends InvalidPtGAttributesException { - - /** - * - */ - private static final long serialVersionUID = 754275707315797289L; - /** - * true if RequestSummaryData is null - */ - private final boolean nullRsd; - - /** - * true if gsm is null - */ - private final boolean nullGlobalStatusManager; - - /** - * Constructor that requires the GridUser, RequestSummaryData, PtGChunkData and - * GlobalStatusManager that caused the exception to be thrown. - */ - public InvalidPtGChunkAttributesException(GridUserInterface gu, RequestSummaryData rsd, - PtGData chunkData, GlobalStatusManager gsm) { - - super(gu, chunkData); - nullRsd = (rsd == null); - nullGlobalStatusManager = (gsm == null); - } - - @Override - public String toString() { - - return String.format( - "Invalid attributes when creating PtGChunk: " - + "null-GridUser=%b, null-RequestSumamryData=%b, null-PtGChunkData=%b, " - + "null-GlobalStatusManager=%b", - nullGu, nullRsd, nullChunkData, nullGlobalStatusManager); - } -} diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java index ce8793add..aacf3c43a 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtGFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtGFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java index 748715ae4..638b026f4 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidPtPFeederAttributesException.java @@ -17,8 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; /** * Class that represents an Exception thrown when a PtPFeeder could not be created because the diff --git a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java index ae0279eb7..fd7fc4522 100644 --- a/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java +++ b/src/main/java/it/grid/storm/asynch/InvalidRequestAttributesException.java @@ -11,8 +11,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestData; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/PtG.java b/src/main/java/it/grid/storm/asynch/PtG.java index 260a5c621..645da72e9 100644 --- a/src/main/java/it/grid/storm/asynch/PtG.java +++ b/src/main/java/it/grid/storm/asynch/PtG.java @@ -11,13 +11,17 @@ package it.grid.storm.asynch; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtGData; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; @@ -42,14 +46,13 @@ import it.grid.storm.namespace.model.DefaultACL; import it.grid.storm.namespace.model.Protocol; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.Streets; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; import it.grid.storm.synchcall.command.CommandHelper; @@ -58,13 +61,6 @@ import it.grid.storm.tape.recalltable.TapeRecallCatalog; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; -import java.util.Arrays; -import java.util.Calendar; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class PtG implements Delegable, Chooser, Request, Suspendedable { protected static final String SRM_COMMAND = "srmPrepareToGet"; @@ -266,214 +262,196 @@ private void downgradeToAnonymousHttpRequest() { */ private void manageIsPermit(StoRI fileStoRI) { - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); + if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) { + /* File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH */ + requestData.changeStatusSRM_INVALID_PATH( + "The requested file either does not exist, or it is a directory!"); + failure = true; + log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on" + + " file, yet file does not exist physically! Or, an srmPrepareToGet" + + " was attempted on a directory!"); + return; + } - boolean isSpaceAuthorized; - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - isSpaceAuthorized = - spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTG); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTG); + /* File exists and it is not a directory */ + /* Sets traverse permissions on file parent folders */ + boolean canTraverse; + try { + canTraverse = managePermitTraverseStep(fileStoRI); + } catch (CannotMapUserException e) { + String explanation = "Unable to find local user for " + DataHelper.getRequestor(requestData); + requestData.changeStatusSRM_FAILURE(explanation); + failure = true; + log.error("{}! CannotMapUserException: {}", explanation, e.getMessage(), e); + return; + } + + if (!canTraverse) { + String explanation = "Cannot travers parents"; + requestData.changeStatusSRM_FAILURE(explanation); + log.error(explanation); + failure = true; + return; } - if (isSpaceAuthorized) { + + try { + + TTURL turl; try { - if ((!fileStoRI.getLocalFile().exists()) || (fileStoRI.getLocalFile().isDirectory())) { - /* - * File does not exist, or it is a directory! Fail request with SRM_INVALID_PATH! - */ - requestData.changeStatusSRM_INVALID_PATH( - "The requested file either" + " does not exist, or it is a directory!"); + turl = fileStoRI.getTURL(requestData.getTransferProtocols()); + } catch (TURLBuildingException e) { + requestData + .changeStatusSRM_FAILURE("Unable to build the TURL for the provided transfer protocol"); + failure = true; + log.error("ERROR in PtGChunk! There was a failure building the " + + "TURL. : TURLBuildingException {}", e.getMessage(), e); + return; + } catch (IllegalArgumentException e) { + /* + * Handle null TURL prefix! This is a programming error: it should not occur! + */ + requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); + failure = true; + log.error( + "ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData " + + "caused StoRI to be unable to establish TTURL! " + "IllegalArgumentException: {}", + e.getMessage(), e); + return; + } catch (InvalidGetTURLProtocolException e) { + requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); + failure = true; + log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData " + + "caused StoRI to be unable to establish TTURL! " + + "InvalidGetTURLProtocolException: {}", e.getMessage(), e); + return; + } + if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + /* Compute the Expiration Time in seconds */ + long expDate = (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value()); + StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate); + + + try { + TSizeInBytes fileSize = + TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + + requestData.setFileSize(fileSize); + log.debug("File size: {}", fileSize); + + } catch (InvalidTSizeAttributesException e) { + requestData.changeStatusSRM_FAILURE("Unable to determine file size"); failure = true; - log.debug("ANOMALY in PtGChunk! PolicyCollector confirms read rights on" - + " file, yet file does not exist physically! Or, an srmPrepareToGet" - + " was attempted on a directory!"); - } else { - /* File exists and it is not a directory */ - /* Sets traverse permissions on file parent folders */ - boolean canTraverse; + log.error("ERROR in PtGChunk! error in file size computation! " + + "InvalidTSizeAttributesException: {}", e.getMessage(), e); + return; + } + } + boolean isOnDisk; + try { + isOnDisk = isStoriOndisk(fileStoRI); + } catch (FSException e) { + requestData.changeStatusSRM_FAILURE("Unable to verify file disk status"); + failure = true; + log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}", + e.getMessage(), e); + return; + } + if (!isOnDisk && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape"); + String voName = null; + if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { + if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) { + voName = + ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO().getValue(); + } + } + try { + new TapeRecallCatalog().insertTask(this, voName, + fileStoRI.getLocalFile().getAbsolutePath()); + } catch (DataAccessException e) { + requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape"); + failure = true; + log.error("ERROR in PtGChunk! error in tape recall task " + + "insertion! DataAccessException: {}", e.getMessage(), e); + return; + } + /* Stores the parameters in this object */ + if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { try { - canTraverse = managePermitTraverseStep(fileStoRI); + backupData(fileStoRI, fileStoRI.getLocalFile(), + ((IdentityInputData) requestData).getUser().getLocalUser(), turl); } catch (CannotMapUserException e) { requestData.changeStatusSRM_FAILURE( "Unable to find local user for " + DataHelper.getRequestor(requestData)); failure = true; log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " - + "CannotMapUserException: {}", + "ERROR in PtGChunk! Unable to find LocalUser " + + "for {}! CannotMapUserException: {}", DataHelper.getRequestor(requestData), e.getMessage(), e); return; } - if (canTraverse) { - TTURL turl; - try { - turl = fileStoRI.getTURL(requestData.getTransferProtocols()); - } catch (TURLBuildingException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to build the TURL for the provided transfer protocol"); - failure = true; - log.error("ERROR in PtGChunk! There was a failure building the " - + "TURL. : TURLBuildingException {}", e.getMessage(), e); - return; - } catch (IllegalArgumentException e) { - /* - * Handle null TURL prefix! This is a programming error: it should not occur! - */ - requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); - failure = true; - log.error("ERROR in PtGChunk! invalid TURLPrefix in PtGChunkData " - + "caused StoRI to be unable to establish TTURL! " - + "IllegalArgumentException: {}", e.getMessage(), e); - return; - } catch (InvalidGetTURLProtocolException e) { - requestData.changeStatusSRM_FAILURE("Unable to decide TURL!"); - failure = true; - log.error("ERROR in PtGChunk! invalid TURL Protocol in PtGChunkData " - + "caused StoRI to be unable to establish TTURL! " - + "InvalidGetTURLProtocolException: {}", e.getMessage(), e); - return; - } - if (fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - /* Compute the Expiration Time in seconds */ - long expDate = - (System.currentTimeMillis() / 1000 + requestData.getPinLifeTime().value()); - StormEA.setPinned(fileStoRI.getLocalFile().getAbsolutePath(), expDate); - - - try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); - - requestData.setFileSize(fileSize); - log.debug("File size: {}", fileSize); - - } catch (InvalidTSizeAttributesException e) { - requestData.changeStatusSRM_FAILURE("Unable to determine file size"); - failure = true; - log.error("ERROR in PtGChunk! error in file size computation! " - + "InvalidTSizeAttributesException: {}", e.getMessage(), e); - return; - } - } - boolean isOnDisk; - try { - isOnDisk = isStoriOndisk(fileStoRI); - } catch (FSException e) { - requestData.changeStatusSRM_FAILURE("Unable to verify file disk status"); - failure = true; - log.error("ERROR in PtGChunk! error in file on disk check! " + "FSException: {}", - e.getMessage(), e); - return; - } - if (!isOnDisk - && fileStoRI.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - requestData.changeStatusSRM_REQUEST_INPROGRESS("Recalling" + " file from tape"); - String voName = null; - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - if (((IdentityInputData) requestData).getUser() instanceof AbstractGridUser) { - voName = ((AbstractGridUser) ((IdentityInputData) requestData).getUser()).getVO() - .getValue(); - } - } - try { - new TapeRecallCatalog().insertTask(this, voName, - fileStoRI.getLocalFile().getAbsolutePath()); - } catch (DataAccessException e) { - requestData.changeStatusSRM_FAILURE("Unable to request file recall from tape"); - failure = true; - log.error("ERROR in PtGChunk! error in tape recall task " - + "insertion! DataAccessException: {}", e.getMessage(), e); - return; - } - /* Stores the parameters in this object */ - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - try { - backupData(fileStoRI, fileStoRI.getLocalFile(), - ((IdentityInputData) requestData).getUser().getLocalUser(), turl); - } catch (CannotMapUserException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to find local user for " + DataHelper.getRequestor(requestData)); - failure = true; - log.error( - "ERROR in PtGChunk! Unable to find LocalUser " - + "for {}! CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); - return; - } - } else { - backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl); - } - - /* - * The request now ends by saving in the DB the IN_PROGRESS status information. The - * effective PtG will be accomplished when the setTaskStatus() method of the - * tapeRecallDAO calls the completeRequest() method. - */ - } else { - /* - * Set the read permission for the user on the localfile and any default ace specified - * in the story files - */ - boolean canRead; - try { - canRead = managePermitReadFileStep(fileStoRI, turl); - } catch (CannotMapUserException e) { - requestData.changeStatusSRM_FAILURE( - "Unable to find local user for " + DataHelper.getRequestor(requestData)); - failure = true; - log.error( - "ERROR in PtGChunk! Unable to find LocalUser for {}! " - + "CannotMapUserException: {}", - DataHelper.getRequestor(requestData), e.getMessage(), e); - return; - } - if (canRead) { - - try { - TSizeInBytes fileSize = - TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); - - requestData.setFileSize(fileSize); - log.debug("File size: {}", fileSize); - - } catch (InvalidTSizeAttributesException e) { - requestData.changeStatusSRM_FAILURE("Unable to determine file size"); - failure = true; - log.error("ERROR in PtGChunk! error in file size computation! " - + "InvalidTSizeAttributesException: {}", e.getMessage(), e); - return; - } - - requestData.setTransferURL(turl); - requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!"); - } else { - requestData.changeStatusSRM_FAILURE( - "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!"); - } - } - } else { - // FIXME roll back Read, and Traverse URGENT! - } + } else { + backupData(fileStoRI, fileStoRI.getLocalFile(), null, turl); } - } catch (SecurityException e) { + /* - * The check for existence of the File failed because there is a SecurityManager installed - * that denies read privileges for that File! Perhaps the local system administrator of - * StoRM set up Java policies that contrast policies described by the PolicyCollector! There - * is a conflict here! + * The request now ends by saving in the DB the IN_PROGRESS status information. The + * effective PtG will be accomplished when the setTaskStatus() method of the tapeRecallDAO + * calls the completeRequest() method. */ - requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!"); - failure = true; - log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException " - + "from Java SecurityManager; StoRM cannot check-existence or " - + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e); + } else { + /* + * Set the read permission for the user on the localfile and any default ace specified in + * the story files + */ + boolean canRead; + try { + canRead = managePermitReadFileStep(fileStoRI, turl); + } catch (CannotMapUserException e) { + requestData.changeStatusSRM_FAILURE( + "Unable to find local user for " + DataHelper.getRequestor(requestData)); + failure = true; + log.error( + "ERROR in PtGChunk! Unable to find LocalUser for {}! " + "CannotMapUserException: {}", + DataHelper.getRequestor(requestData), e.getMessage(), e); + return; + } + if (canRead) { + + try { + TSizeInBytes fileSize = + TSizeInBytes.make(fileStoRI.getLocalFile().length(), SizeUnit.BYTES); + + requestData.setFileSize(fileSize); + log.debug("File size: {}", fileSize); + + } catch (InvalidTSizeAttributesException e) { + requestData.changeStatusSRM_FAILURE("Unable to determine file size"); + failure = true; + log.error("ERROR in PtGChunk! error in file size computation! " + + "InvalidTSizeAttributesException: {}", e.getMessage(), e); + return; + } + + requestData.setTransferURL(turl); + requestData.changeStatusSRM_FILE_PINNED("srmPrepareToGet successfully handled!"); + } else { + requestData.changeStatusSRM_FAILURE( + "Local filesystem mask does not allow" + " setting up correct ACLs for PtG!"); + } } - } else { - String emsg = String.format("Read access to %s in Storage Area: %s " + "denied!", - requestData.getSURL(), token); - requestData.changeStatusSRM_AUTHORIZATION_FAILURE(emsg); + } catch (SecurityException e) { + /* + * The check for existence of the File failed because there is a SecurityManager installed + * that denies read privileges for that File! Perhaps the local system administrator of StoRM + * set up Java policies that contrast policies described by the PolicyCollector! There is a + * conflict here! + */ + requestData.changeStatusSRM_FAILURE("StoRM is not allowed to work on " + "requested file!"); failure = true; - log.debug(emsg); + log.error("ATTENTION in PtGChunk! PtGChunk received a SecurityException " + + "from Java SecurityManager; StoRM cannot check-existence or " + + "check-if-directory for: {}", fileStoRI.getLocalFile().toString(), e); } } @@ -491,24 +469,13 @@ private void manageIsPermit(StoRI fileStoRI) { private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserException { - if (!downgradedToAnonymous && requestData instanceof IdentityInputData) { - - if (!setupACLs) - return verifyPath(fileStoRI); + if (!downgradedToAnonymous && requestData instanceof IdentityInputData && setupACLs) { return verifyPath(fileStoRI) && setParentsAcl(fileStoRI, ((IdentityInputData) requestData).getUser().getLocalUser()); } - if (verifyPath(fileStoRI)) { - - if (setupACLs) - setHttpsServiceParentAcl(fileStoRI); - - return true; - } - - return false; + return verifyPath(fileStoRI); } private boolean verifyPath(StoRI fileStoRI) { @@ -582,7 +549,6 @@ private boolean managePermitReadFileStep(StoRI fileStoRI, TTURL turl) if (setupACLs) { setDefaultAcl(fileStoRI, fileStoRI.getLocalFile()); - setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.Read); } return true; @@ -699,29 +665,6 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis return true; } - private void setHttpsServiceParentAcl(StoRI fileStoRI) { - - log.debug("Adding parent https ACL for directory : '{}' parents", fileStoRI.getAbsolutePath()); - - for (StoRI parentStoRI : fileStoRI.getParents()) { - setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse); - } - } - - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("Adding https ACL {} for directory : '{}'", permission, file); - - try { - AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission); - } catch (IllegalArgumentException e) { - log.error("Unable to grant user permission on the created folder. " - + "IllegalArgumentException: {}", e.getMessage(), e); - requestData.getStatus() - .extendExplaination("Unable to grant group permission on the created folder"); - } - } - private void setDefaultAcl(StoRI fileStoRI, LocalFile localFile) { /* Manage DefaultACL */ diff --git a/src/main/java/it/grid/storm/asynch/PtGBuilder.java b/src/main/java/it/grid/storm/asynch/PtGBuilder.java index e9d942105..f7915ae5d 100644 --- a/src/main/java/it/grid/storm/asynch/PtGBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtGBuilder.java @@ -17,15 +17,16 @@ package it.grid.storm.asynch; -import it.grid.storm.asynch.BuilderException; -import it.grid.storm.asynch.PtG; -import it.grid.storm.catalogs.AnonymousPtGData; -import it.grid.storm.catalogs.IdentityPtGData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtGDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtGData; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtGData; +import it.grid.storm.persistence.model.IdentityPtGData; +import it.grid.storm.persistence.model.PtGData; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TReturnStatus; @@ -35,8 +36,6 @@ import it.grid.storm.srm.types.TTURL; import it.grid.storm.synchcall.data.IdentityInputData; import it.grid.storm.synchcall.data.datatransfer.FileTransferInputData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * @author Michele Dibenedetto diff --git a/src/main/java/it/grid/storm/asynch/PtGFeeder.java b/src/main/java/it/grid/storm/asynch/PtGFeeder.java index 0a73c85d1..b4c08e6ca 100644 --- a/src/main/java/it/grid/storm/asynch/PtGFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtGFeeder.java @@ -17,11 +17,8 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.namespace.InvalidDescendantsEmptyRequestException; import it.grid.storm.namespace.InvalidDescendantsFileRequestException; import it.grid.storm.namespace.InvalidDescendantsPathRequestException; @@ -30,6 +27,9 @@ import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; diff --git a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java index cba0e3227..40a6dca6d 100644 --- a/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtGPersistentChunk.java @@ -19,9 +19,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.PtGPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/PtP.java b/src/main/java/it/grid/storm/asynch/PtP.java index 257aedb8f..b577a6a91 100644 --- a/src/main/java/it/grid/storm/asynch/PtP.java +++ b/src/main/java/it/grid/storm/asynch/PtP.java @@ -22,10 +22,7 @@ import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.catalogs.PtPData; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.catalogs.VolatileAndJiTCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; @@ -49,6 +46,7 @@ import it.grid.storm.namespace.model.ACLEntry; import it.grid.storm.namespace.model.DefaultACL; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.scheduler.Chooser; import it.grid.storm.scheduler.Delegable; @@ -332,24 +330,6 @@ private void manageOverwriteExistingFile(StoRI fileStoRI) { */ private void managePermit(StoRI fileStoRI) { - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(PtP.log, fileStoRI); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (requestData instanceof IdentityInputData) { - isSpaceAuthorized = - spaceAuth.authorize(((IdentityInputData) requestData).getUser(), SRMSpaceRequest.PTP); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.PTP); - } - if (!isSpaceAuthorized) { - requestData.changeStatusSRM_AUTHORIZATION_FAILURE("Create/Write access for " - + requestData.getSURL() + " in Storage Area: " + token + " denied!"); - failure = true; - log.debug("Create/Write access for {} in Storage Area: {} denied!", requestData.getSURL(), - token); - return; - } TTURL auxTURL; try { auxTURL = fileStoRI.getTURL(requestData.getTransferProtocols()); @@ -445,7 +425,6 @@ private boolean managePermitTraverseStep(StoRI fileStoRI) throws CannotMapUserEx return setParentAcl(fileStoRI, user); } - setHttpsServiceParentAcl(fileStoRI); return true; } @@ -559,7 +538,6 @@ private boolean managePermitSetFileStep(StoRI fileStoRI) throws CannotMapUserExc setDefaultAcl(fileStoRI); setTapeManagementAcl(fileStoRI); - setHttpsServiceAcl(fileStoRI.getLocalFile(), FilesystemPermission.ReadWrite); return true; } @@ -674,29 +652,6 @@ private boolean setAoTAcl(StoRI fileStori, LocalUser localUser, FilesystemPermis return response; } - private void setHttpsServiceParentAcl(StoRI fileStoRI) { - - log.debug("SrmMkdir: Adding parent https ACL for directory: '{}' parents", - fileStoRI.getAbsolutePath()); - for (StoRI parentStoRI : fileStoRI.getParents()) { - setHttpsServiceAcl(parentStoRI.getLocalFile(), FilesystemPermission.Traverse); - } - } - - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("SrmMkdir: Adding https ACL {} for directory : '{}'", permission, file); - - try { - AclManagerFS.getInstance().grantHttpsServiceGroupPermission(file, permission); - } catch (IllegalArgumentException e) { - log.error("Unable to grant user permission on the created folder. " - + "IllegalArgumentException: {}", e.getMessage(), e); - requestData.getStatus() - .extendExplaination("Unable to grant group permission on the created folder"); - } - } - /** * Private method used to manage ReserveSpace. Returns false if something went wrong! */ @@ -868,7 +823,7 @@ private boolean isExistingSpaceToken(TSpaceToken spaceToken) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new ReservedSpaceCatalog().getStorageSpace(spaceToken); + spaceData = ReservedSpaceCatalog.getInstance().getStorageSpace(spaceToken); } catch (TransferObjectDecodingException e) { log.error("Unable to build StorageSpaceData from StorageSpaceTO." + " TransferObjectDecodingException: {}", e.getMessage()); diff --git a/src/main/java/it/grid/storm/asynch/PtPBuilder.java b/src/main/java/it/grid/storm/asynch/PtPBuilder.java index 38150ea60..a04317c88 100644 --- a/src/main/java/it/grid/storm/asynch/PtPBuilder.java +++ b/src/main/java/it/grid/storm/asynch/PtPBuilder.java @@ -19,14 +19,15 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.AnonymousPtPData; -import it.grid.storm.catalogs.IdentityPtPData; -import it.grid.storm.catalogs.InvalidFileTransferDataAttributesException; -import it.grid.storm.catalogs.InvalidPtPDataAttributesException; -import it.grid.storm.catalogs.InvalidSurlRequestDataAttributesException; -import it.grid.storm.catalogs.PtPData; + import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.model.AnonymousPtPData; +import it.grid.storm.persistence.model.IdentityPtPData; +import it.grid.storm.persistence.model.PtPData; import it.grid.storm.srm.types.TFileStorageType; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; diff --git a/src/main/java/it/grid/storm/asynch/PtPFeeder.java b/src/main/java/it/grid/storm/asynch/PtPFeeder.java index 65486ac20..c1df96972 100644 --- a/src/main/java/it/grid/storm/asynch/PtPFeeder.java +++ b/src/main/java/it/grid/storm/asynch/PtPFeeder.java @@ -18,10 +18,10 @@ package it.grid.storm.asynch; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.Delegable; import it.grid.storm.scheduler.SchedulerException; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java index 65f830d32..686649ec5 100644 --- a/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java +++ b/src/main/java/it/grid/storm/asynch/PtPPersistentChunk.java @@ -13,9 +13,9 @@ import java.util.Arrays; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPData; -import it.grid.storm.catalogs.PtPPersistentChunkData; -import it.grid.storm.catalogs.RequestSummaryData; +import it.grid.storm.persistence.model.PtPData; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.scheduler.PersistentRequestChunk; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.CommandHelper; diff --git a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java b/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java deleted file mode 100644 index 7d036c706..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMPrepareToPutReply.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents a reply to an issued SRMPrepareToPut command. It provides a method to - * recover the assigned request token. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date September, 2005 - */ -public class SRMPrepareToPutReply { - - // TRequestToken assigned during the SRM prepare to put operation - private TRequestToken requestToken = null; - - /** - * Constructor that requires the assigned TRequestToken; if it is null, an - * InvalidPutReplyAttributeException is thrown. - */ - public SRMPrepareToPutReply(TRequestToken requestToken) throws InvalidPutReplyAttributeException { - - if (requestToken == null) - throw new InvalidPutReplyAttributeException(); - this.requestToken = requestToken; - } - - /** - * Method that returns the assigned request token. - */ - public TRequestToken requestToken() { - - return requestToken; - } - - public String toString() { - - return "requestToken=" + requestToken; - } -} diff --git a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java b/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java deleted file mode 100644 index 215356c2b..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMPutDoneReply.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TReturnStatus; - -/** - * Class that represents the reply received from issuing an srmPutDone command. - * - * @author EGRID ICTP Trieste - * @version 1.0 - * @date August 2006 - */ -public class SRMPutDoneReply { - - private TReturnStatus overallRetStat = null; // overall request return status - - /** - * Constructor that requires the overall TReturnStatus of the reply. - */ - public SRMPutDoneReply(TReturnStatus overallRetStat) - throws InvalidPutDoneReplyAttributeException { - - if (overallRetStat == null) - throw new InvalidPutDoneReplyAttributeException(); - this.overallRetStat = overallRetStat; - } - - /** - * Method that returns the overll status of the request. - */ - public TReturnStatus overallRetStat() { - - return overallRetStat; - } - - public String toString() { - - return "SRMPutDoneReply: overall TReturnStatus is " + overallRetStat.toString(); - } -} diff --git a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java b/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java deleted file mode 100644 index f304c6d81..000000000 --- a/src/main/java/it/grid/storm/asynch/SRMStatusOfPutRequestReply.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.asynch; - -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * Class that represents the reply returned from an invocation of SRMStatusOfPutRequest. It supplies - * methods for quering the toTURL assigned, and the returnStatus of the request. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date September 2005 - */ -public class SRMStatusOfPutRequestReply { - - private TTURL toTURL = null; // TTURL as supplied by the invoked server in the - // SRMStatusOfPutRequest - private TReturnStatus returnStatus = null; // returnStatus as supplied by the - // invoked server in the - // SRMStatusOfPutRequest - - public SRMStatusOfPutRequestReply(TTURL toTURL, TReturnStatus returnStatus) - throws InvalidPutStatusAttributesException { - - if ((toTURL == null) || (returnStatus == null)) - throw new InvalidPutStatusAttributesException(toTURL, returnStatus); - this.toTURL = toTURL; - this.returnStatus = returnStatus; - } - - /** - * Method that returns the toTURL that the invoked server assigned to the put request. - */ - public TTURL toTURL() { - - return toTURL; - } - - /** - * Method that returns the TReturnStatus that the invoked server assigned to the put request. - */ - public TReturnStatus returnStatus() { - - return returnStatus; - } - - public String toString() { - - return "toTURL= " + toTURL + "; returnStatus=" + returnStatus; - } -} diff --git a/src/main/java/it/grid/storm/asynch/Suspendedable.java b/src/main/java/it/grid/storm/asynch/Suspendedable.java index cfd887e00..970996c54 100644 --- a/src/main/java/it/grid/storm/asynch/Suspendedable.java +++ b/src/main/java/it/grid/storm/asynch/Suspendedable.java @@ -17,7 +17,7 @@ package it.grid.storm.asynch; -import it.grid.storm.catalogs.RequestData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; public interface Suspendedable { diff --git a/src/main/java/it/grid/storm/authz/AuthzDirector.java b/src/main/java/it/grid/storm/authz/AuthzDirector.java index 154001521..4e65b95f5 100644 --- a/src/main/java/it/grid/storm/authz/AuthzDirector.java +++ b/src/main/java/it/grid/storm/authz/AuthzDirector.java @@ -17,159 +17,44 @@ package it.grid.storm.authz; -import it.grid.storm.authz.path.PathAuthz; -import it.grid.storm.authz.path.conf.PathAuthzDBReader; -import it.grid.storm.authz.sa.SpaceDBAuthz; -import it.grid.storm.authz.sa.test.MockSpaceAuthz; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.NamespaceInterface; -import it.grid.storm.namespace.VirtualFSInterface; -import it.grid.storm.namespace.model.SAAuthzType; -import it.grid.storm.srm.types.TSpaceToken; - -import java.io.File; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class AuthzDirector { - - private static final Logger log = LoggerFactory - .getLogger(AuthzDirector.class); - private static String configurationPATH; - - // Map between 'SpaceToken' and the related 'SpaceAuthz' - private static Map spaceAuthzs = null; - - // PathAuthz is only one, shared by all SAs - private static PathAuthzInterface pathAuthz = null; - - /** - * Scan the Namespace.xml to retrieve the list of file AuthZDB to digest - */ - private static Map buildSpaceAuthzsMAP() { - - HashMap spaceAuthzMap = new HashMap(); - - // Retrieve the list of VFS from Namespace - NamespaceInterface ns = NamespaceDirector.getNamespace(); - ArrayList vfss; - try { - vfss = new ArrayList(ns.getAllDefinedVFS()); - for (VirtualFSInterface vfs : vfss) { - String vfsName = vfs.getAliasName(); - SAAuthzType authzTp = vfs.getStorageAreaAuthzType(); - String authzName = ""; - if (authzTp.equals(SAAuthzType.AUTHZDB)) { - // The Space Authz is based on Authz DB - authzName = vfs.getStorageAreaAuthzDB(); - log.debug("Loading AuthzDB '{}'", authzName); - if (existsAuthzDBFile(authzName)) { - // Digest the Space AuthzDB File - TSpaceToken spaceToken = vfs.getSpaceToken(); - SpaceAuthzInterface spaceAuthz = new SpaceDBAuthz(authzName); - spaceAuthzMap.put(spaceToken, spaceAuthz); - } else { - log.error("File AuthzDB '{}' related to '{}' does not exists.", - authzName, vfsName); - } - } else { - authzName = vfs.getStorageAreaAuthzFixed(); - } - log.debug("VFS ['{}'] = {} : {}", vfsName, authzTp, authzName); - } - } catch (NamespaceException e) { - log.error("Unable to initialize AUTHZ DB! Error: {}", e.getMessage(), e); - } - - return spaceAuthzMap; - } - - /** - * Utility method - * - * @param dbFileName - * @return - * @throws AuthzDBReaderException - */ - private static boolean existsAuthzDBFile(String dbFileName) { - - String fileName = configurationPATH + File.separator + dbFileName; - boolean exists = (new File(fileName)).exists(); - if (!exists) { - log.warn("The AuthzDB File '{}' does not exists", dbFileName); - } - return exists; - } - - // **************************************** - // PUBLIC METHODS - // **************************************** - - /****************************** - * SPACE AUTHORIZATION ENGINE - ******************************/ - public static void initializeSpaceAuthz() { - - // Build Space Authzs MAP - spaceAuthzs = buildSpaceAuthzsMAP(); - } - - /** - * Retrieve the Space Authorization module related to the Space Token - * - * @param token - * @return - */ - public static SpaceAuthzInterface getSpaceAuthz(TSpaceToken token) { - - SpaceAuthzInterface spaceAuthz = new MockSpaceAuthz(); - // Retrieve the SpaceAuthz related to the Space Token - if ((spaceAuthzs != null) && (spaceAuthzs.containsKey(token))) { - spaceAuthz = spaceAuthzs.get(token); - log.debug("Space Authz related to S.Token ='{}' is '{}'", token, - spaceAuthz.getSpaceAuthzID()); - } else { - log.debug("Space Authz related to S.Token ='{}' does not exists. " - + "Use the MOCK one.", token); - } - return spaceAuthz; - } - - /****************************** - * PATH AUTHORIZATION ENGINE - ******************************/ - - /** - * Initializating the Path Authorization engine - * - * @param pathAuthz2 - */ - public static void initializePathAuthz(String pathAuthzDBFileName) - throws DirectorException { - - PathAuthzDBReader authzDBReader; - try { - authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); - } catch (Exception e) { - log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); - throw new DirectorException("Unable to build a PathAuthzDBReader"); - } - AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); - } +import it.grid.storm.authz.path.PathAuthz; +import it.grid.storm.authz.path.conf.PathAuthzDBReader; - /** - * Retrieve the Path Authorization module - * - * @todo: To implement this. - */ - public static PathAuthzInterface getPathAuthz() { +public class AuthzDirector { - return AuthzDirector.pathAuthz; - } + private static final Logger log = LoggerFactory.getLogger(AuthzDirector.class); + + // PathAuthz is only one, shared by all SAs + private static PathAuthzInterface pathAuthz = null; + + /** + * Initialize the Path Authorization engine + * + * @param pathAuthz2 + */ + public static void initializePathAuthz(String pathAuthzDBFileName) throws DirectorException { + + PathAuthzDBReader authzDBReader; + try { + authzDBReader = new PathAuthzDBReader(pathAuthzDBFileName); + } catch (Exception e) { + log.error("Unable to build a PathAuthzDBReader: {}", e.getMessage(), e); + throw new DirectorException("Unable to build a PathAuthzDBReader"); + } + AuthzDirector.pathAuthz = new PathAuthz(authzDBReader.getPathAuthzDB()); + } + + /** + * Retrieve the Path Authorization module + * + * @todo: To implement this. + */ + public static PathAuthzInterface getPathAuthz() { + + return AuthzDirector.pathAuthz; + } } diff --git a/src/main/java/it/grid/storm/authz/AuthzException.java b/src/main/java/it/grid/storm/authz/AuthzException.java index 945f41e4c..69d2e848a 100644 --- a/src/main/java/it/grid/storm/authz/AuthzException.java +++ b/src/main/java/it/grid/storm/authz/AuthzException.java @@ -26,28 +26,18 @@ */ public class AuthzException extends RuntimeException { - /** - * - */ - private static final long serialVersionUID = 1L; + /** + * + */ + private static final long serialVersionUID = 1L; - public AuthzException() { + public AuthzException() { - super(); - } + super(); + } - public AuthzException(String message) { + public AuthzException(String message) { - super(message); - } - - public AuthzException(String message, Throwable cause) { - - super(message, cause); - } - - public AuthzException(Throwable cause) { - - super(cause); - } + super(message); + } } diff --git a/src/main/java/it/grid/storm/authz/DirectorException.java b/src/main/java/it/grid/storm/authz/DirectorException.java index 7e7382651..a84d37099 100644 --- a/src/main/java/it/grid/storm/authz/DirectorException.java +++ b/src/main/java/it/grid/storm/authz/DirectorException.java @@ -2,28 +2,18 @@ public class DirectorException extends Exception { - /** - * - */ - private static final long serialVersionUID = 8391356294029256927L; + /** + * + */ + private static final long serialVersionUID = 8391356294029256927L; - public DirectorException() { + public DirectorException() { - } + } - public DirectorException(String message) { + public DirectorException(String message) { - super(message); - } - - public DirectorException(Throwable cause) { - - super(cause); - } - - public DirectorException(String message, Throwable cause) { - - super(message, cause); - } + super(message); + } } diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java index 917c8875d..ecbc9e84b 100644 --- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java +++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDB.java @@ -46,13 +46,6 @@ public PathAuthzDB(String pathAuthzDBID, PathAuthzEvaluationAlgorithm algorithm, this.authzDB.addAll(aces); } - public PathAuthzDB(String pathAuthzDBID, List aces) { - - this.pathAuthzDBID = pathAuthzDBID; - this.evaluationAlg = DEFAULT_ALGORITHM; - this.authzDB.addAll(aces); - } - /** * Empty constructor. Use it only if there is not */ @@ -63,11 +56,6 @@ public PathAuthzDB() { this.authzDB.add(PathACE.PERMIT_ALL); } - public void addPathACE(PathACE pathAce) { - - authzDB.add(pathAce); - } - public List getACL() { return authzDB; diff --git a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java index 7b9b0aebf..02e9a5ea5 100644 --- a/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java +++ b/src/main/java/it/grid/storm/authz/path/conf/PathAuthzDBReader.java @@ -71,15 +71,6 @@ public PathAuthzDBReader(String filename) throws Exception { log.info(pathAuthzDB.toString()); } - public void refreshPathAuthzDB() throws Exception { - - log.debug(" Start refreshing."); - pathAuthzDB = loadPathAuthzDB(authzDBFilename); - log.debug(" End refreshing."); - log.info("Path Authz DB ('{}') RE-loaded.", pathAuthzDB.getPathAuthzDBID()); - log.info(pathAuthzDB.toString()); - } - public PathAuthzDB getPathAuthzDB() { return pathAuthzDB; diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java index 84e791fd5..9cc821191 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzAlgBestMatch.java @@ -35,6 +35,8 @@ */ public class PathAuthzAlgBestMatch extends PathAuthzEvaluationAlgorithm { + private static PathAuthzAlgBestMatch instance; + public static PathAuthzEvaluationAlgorithm getInstance() { if (instance == null) { diff --git a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java index 4abf781fb..95a1d1bac 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathAuthzEvaluationAlgorithm.java @@ -28,17 +28,6 @@ */ public abstract class PathAuthzEvaluationAlgorithm { - public static PathAuthzEvaluationAlgorithm instance = null; - - public static PathAuthzEvaluationAlgorithm getInstance() throws Exception { - - if (instance == null) { - throw new Exception( - "Unable to provide the instance, my comcrete subclass as not provided any"); - } - return instance; - } - public abstract AuthzDecision evaluate(String subject, StFN fileName, SRMFileRequest pathOperation, List acl); diff --git a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java index cf4cd53f8..245ad44b1 100644 --- a/src/main/java/it/grid/storm/authz/path/model/PathOperation.java +++ b/src/main/java/it/grid/storm/authz/path/model/PathOperation.java @@ -25,12 +25,15 @@ */ public enum PathOperation { - WRITE_FILE('W', "WRITE_FILE", "Write data"), READ_FILE('R', "READ_FILE", "Read data", - true), RENAME('F', "RENAME", "Rename a file or a directory"), DELETE('D', "DELETE", - "Delete a file or a directory"), LIST_DIRECTORY('L', "LIST_DIRECTORY", - "Listing a directory", - true), MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"), CREATE_FILE('N', - "CREATE_FILE", "Create a new file"), UNDEFINED('?', "UNDEFINED", "Undefined"); + + WRITE_FILE('W', "WRITE_FILE", "Write data"), + READ_FILE('R', "READ_FILE", "Read data", true), + RENAME('F', "RENAME", "Rename a file or a directory"), + DELETE('D', "DELETE", "Delete a file or a directory"), + LIST_DIRECTORY('L', "LIST_DIRECTORY", "Listing a directory", true), + MAKE_DIRECTORY('M', "CREATE_DIRECTORY", "Create a directory"), + CREATE_FILE('N', "CREATE_FILE", "Create a new file"), + UNDEFINED('?', "UNDEFINED", "Undefined"); private final char operation; private final String operationName; @@ -69,8 +72,6 @@ public static PathOperation getSpaceOperation(char op) { return RENAME; case 'D': return DELETE; - // case 'T': - // return TRAVERSE_DIRECTORY; case 'L': return LIST_DIRECTORY; case 'M': @@ -93,16 +94,6 @@ public char getSpaceOperationValue() { return operation; } - public PathOperation getSpaceOp(int ordinal) { - - PathOperation[] sp = PathOperation.values(); - if ((ordinal >= 0) && (ordinal < sp.length)) { - return sp[ordinal]; - } else { - return UNDEFINED; - } - } - public int getNumberOfPathOp() { return PathOperation.values().length - 1; diff --git a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java index 7f49e3c69..e3e06c989 100644 --- a/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java +++ b/src/main/java/it/grid/storm/authz/path/model/SRMFileRequest.java @@ -21,7 +21,6 @@ package it.grid.storm.authz.path.model; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; /** @@ -161,55 +160,6 @@ public enum SRMFileRequest { private final String srmOp; private final PathAccessMask requestedPathOps; - private static HashMap ops = new HashMap() { - - /** - * - */ - private static final long serialVersionUID = 1L; - - { - put("PTP-Over", PTP_Overwrite); - put("srmPrepareToPut-overwrite", PTP_Overwrite); - put("PTP", PTP); - put("srmPrepareToPut", PTP); - put("PTG", PTG); - put("srmPrepareToGet", PTG); - put("CPto_Over", CPto_Overwrite); - put("srmCopy to-overwrite", CPto_Overwrite); - put("CPto", CPto); - put("srmCopy to", CPto); - put("CPFrom", CPfrom); - put("srmCopy from", CPfrom); - put("RM", RM); - put("srmRm", RM); - put("RMD", RMD); - put("srmRemoveDir", RM); - put("MD", MD); - put("srmMakeDir", MD); - put("LS", LS); - put("srmLs", LS); - put("MV-source", MV_source); - put("srmMove-source", MV_source); - put("MV-dest-Over", MV_dest_Overwrite); - put("srmMove-dest-overwrite", MV_dest_Overwrite); - put("MV-dest", MV_dest); - put("srmMove-dest", MV_dest); - } - }; - - /* - * Used only for testing - */ - public static SRMFileRequest buildFromString(String srmOp) { - - if (ops.containsKey(srmOp)) { - return ops.get(srmOp); - } else { - return null; - } - } - /** * SRMOperation */ diff --git a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java index 421d01d6c..428631e9a 100644 --- a/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java +++ b/src/main/java/it/grid/storm/authz/remote/resource/PermissionEvaluator.java @@ -35,7 +35,6 @@ import it.grid.storm.authz.path.model.PathOperation; import it.grid.storm.authz.path.model.SRMFileRequest; import it.grid.storm.authz.remote.Constants; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.InvalidStFNAttributeException; import it.grid.storm.common.types.StFN; import it.grid.storm.config.Configuration; @@ -47,6 +46,7 @@ import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.model.MappingRule; import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TOverwriteMode; class PermissionEvaluator { diff --git a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java index 1a3522b5f..11107991b 100644 --- a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java +++ b/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderException.java @@ -33,14 +33,4 @@ public AuthzDBReaderException(String message) { super(message); } - - public AuthzDBReaderException(String message, Throwable cause) { - - super(message, cause); - } - - public AuthzDBReaderException(Throwable cause) { - - super(cause); - } } diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java deleted file mode 100644 index 8cec4103e..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceAuthz.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.authz.sa; - -import it.grid.storm.authz.SpaceAuthzInterface; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.griduser.GridUserInterface; - -public abstract class SpaceAuthz implements SpaceAuthzInterface { - - private AuthzDBInterface authzDB; - - /** - * @todo: 1) IMPLEMENT AUHTZ ENGINE - * @todo: 2) IMPLEMENT CACHE - * @todo: 3) IMPLEMENT PRINCIPAL LIST PERSISTENCE - * @todo: 4) IMPLEMENT RECALCULATE CACHE - */ - - public SpaceAuthz() { - - super(); - } - - public abstract boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp); - - public void setAuthzDB(AuthzDBInterface authzDB) { - - this.authzDB = authzDB; - } - - public AuthzDBInterface getAuthzDB() { - - return authzDB; - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java deleted file mode 100644 index 8c891db0b..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceDBAuthz.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa; - -import java.io.File; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.config.Configuration; -import it.grid.storm.griduser.GridUserInterface; - -/** - * @author zappi - */ -public class SpaceDBAuthz extends SpaceAuthz { - - private static final Logger log = LoggerFactory.getLogger(SpaceDBAuthz.class); - - public static final String UNDEF = "undef-SpaceAuthzDB"; - - private String spaceAuthzDBID = "not-defined"; - private static String configurationPATH; - private String dbFileName; - - public SpaceDBAuthz() { - - } - - /** - * @return - */ - public static SpaceDBAuthz makeEmpty() { - - SpaceDBAuthz result = new SpaceDBAuthz(); - result.setSpaceAuthzDBID("default-SpaceAuthzDB"); - return result; - } - - public SpaceDBAuthz(String dbFileName) { - - Configuration config = Configuration.getInstance(); - configurationPATH = config.namespaceConfigPath(); - if (existsAuthzDBFile(dbFileName)) { - this.dbFileName = dbFileName; - spaceAuthzDBID = dbFileName; - } - } - - /** - * @param string - */ - void setSpaceAuthzDBID(String id) { - - spaceAuthzDBID = id; - } - - /** - * - */ - @Override - public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) { - - return false; - } - - @Override - public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) { - - return false; - } - - - /********************************************************************** - * BUILDINGs METHODS - */ - - /** - * Check the existence of the AuthzDB file - */ - private boolean existsAuthzDBFile(String dbFileName) { - - String fileName = configurationPATH + File.separator + dbFileName; - boolean exists = (new File(fileName)).exists(); - if (!(exists)) { - log.error("The AuthzDB File '{}' does not exists", dbFileName); - } - return exists; - } - - /** - * Return the AuthzDB FileName - * - * @return - */ - String getAuthzDBFileName() { - - return dbFileName; - } - - public String getSpaceAuthzID() { - - return spaceAuthzDBID; - } - - /** - * - */ - public void refresh() { - - // empty - } - -} diff --git a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java b/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java deleted file mode 100644 index 3ccec5bb4..000000000 --- a/src/main/java/it/grid/storm/authz/sa/SpaceFixedAuthz.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * - */ -package it.grid.storm.authz.sa; - -import it.grid.storm.authz.sa.model.AuthzDBFixed; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; -import it.grid.storm.griduser.GridUserInterface; - -/** - * @author zappi - */ -public class SpaceFixedAuthz extends SpaceAuthz { - - private static final String FIXED_ID = "fixed-space-authz"; - - public SpaceFixedAuthz(AuthzDBFixed fixedAuthzDB) - throws AuthzDBReaderException { - - } - - @Override - public boolean authorize(GridUserInterface guser, SRMSpaceRequest srmSpaceOp) { - - // @todo : implement the simple algorithm. - return true; - } - - @Override - public boolean authorizeAnonymous(SRMSpaceRequest srmSpaceOp) { - - // TODO Auto-generated method stub - return true; - } - - public String getSpaceAuthzID() { - - return FIXED_ID; - } - - public void refresh() { - - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java deleted file mode 100644 index 70da88a0b..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousFileTransferData.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ - -public abstract class AnonymousFileTransferData extends - SurlMultyOperationRequestData implements FileTransferData { - - protected TURLPrefix transferProtocols; - protected TTURL transferURL; - - public AnonymousFileTransferData(TSURL toSURL, TURLPrefix transferProtocols, - TReturnStatus status, TTURL transferURL) - throws InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(toSURL, status); - if (transferProtocols == null || transferURL == null) { - throw new InvalidFileTransferDataAttributesException(toSURL, - transferProtocols, status, transferURL); - } - this.transferProtocols = transferProtocols; - this.transferURL = transferURL; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.FileTransferData#getTransferProtocols() - */ - @Override - public final TURLPrefix getTransferProtocols() { - - return transferProtocols; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.FileTransferData#getTransferURL() - */ - @Override - public final TTURL getTransferURL() { - - return transferURL; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.FileTransferData#setTransferURL(it.grid.storm.srm - * .types.TTURL) - */ - @Override - public final void setTransferURL(final TTURL turl) { - - if (turl != null) { - transferURL = turl; - } - } -} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java deleted file mode 100644 index 615c590fe..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtGData.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 3.0 - */ -public class AnonymousPtGData extends AnonymousFileTransferData implements - PtGData { - - private static final Logger log = LoggerFactory - .getLogger(AnonymousPtGData.class); - - /** requested lifetime of TURL: it is the pin time! */ - protected TLifeTimeInSeconds pinLifeTime; - /** specifies if the request regards a directory and related info */ - protected TDirOption dirOption; - /** size of file */ - protected TSizeInBytes fileSize; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public AnonymousPtGData(TSURL SURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) - throws InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(SURL, desiredProtocols, status, transferURL); - if (lifeTime == null || dirOption == null || fileSize == null) { - log.debug("Invalid arguments: lifeTime={}, dirOption={}, fileSize={}", - lifeTime, dirOption, fileSize); - throw new InvalidPtGDataAttributesException(SURL, lifeTime, dirOption, - desiredProtocols, fileSize, status, transferURL); - - } - this.pinLifeTime = lifeTime; - this.dirOption = dirOption; - this.fileSize = fileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getPinLifeTime() - */ - @Override - public TLifeTimeInSeconds getPinLifeTime() { - - return pinLifeTime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getDirOption() - */ - @Override - public TDirOption getDirOption() { - - return dirOption; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtGData#getFileSize() - */ - @Override - public TSizeInBytes getFileSize() { - - return fileSize; - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.PtGData#setFileSize(it.grid.storm.srm.types.TSizeInBytes - * ) - */ - @Override - public void setFileSize(TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - } - - /* - * (non-Javadoc) - * - * @see - * it.grid.storm.catalogs.PtGData#changeStatusSRM_FILE_PINNED(java.lang.String - * ) - */ - @Override - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtGChunkData [pinLifeTime="); - builder.append(pinLifeTime); - builder.append(", dirOption="); - builder.append(dirOption); - builder.append(", fileSize="); - builder.append(fileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((dirOption == null) ? 0 : dirOption.hashCode()); - result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode()); - result = prime * result - + ((pinLifeTime == null) ? 0 : pinLifeTime.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - AnonymousPtGData other = (AnonymousPtGData) obj; - if (dirOption == null) { - if (other.dirOption != null) { - return false; - } - } else if (!dirOption.equals(other.dirOption)) { - return false; - } - if (fileSize == null) { - if (other.fileSize != null) { - return false; - } - } else if (!fileSize.equals(other.fileSize)) { - return false; - } - if (pinLifeTime == null) { - if (other.pinLifeTime != null) { - return false; - } - } else if (!pinLifeTime.equals(other.pinLifeTime)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java b/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java deleted file mode 100644 index dca2d5af5..000000000 --- a/src/main/java/it/grid/storm/catalogs/AnonymousPtPData.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ -public class AnonymousPtPData extends AnonymousFileTransferData implements - PtPData { - - private static final Logger log = LoggerFactory.getLogger(AnonymousPtPData.class); - - protected TSpaceToken spaceToken; - protected TLifeTimeInSeconds pinLifetime; - protected TLifeTimeInSeconds fileLifetime; - protected TFileStorageType fileStorageType; - protected TOverwriteMode overwriteOption; - protected TSizeInBytes expectedFileSize; - - public AnonymousPtPData(TSURL toSURL, TLifeTimeInSeconds pinLifetime, - TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes expectedFileSize, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) - throws InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(toSURL, transferProtocols, status, transferURL); - if (pinLifetime == null || fileLifetime == null || spaceToken == null - || fileStorageType == null || expectedFileSize == null - || overwriteOption == null) { - log.debug("Invalid arguments: pinLifetime={}, fileLifetime={}, " - + "spaceToken={}, fileStorageType={}, expectedFileSize={}, " - + "overwriteOption={}", pinLifetime, fileLifetime, spaceToken, - fileStorageType, expectedFileSize, overwriteOption); - throw new InvalidPtPDataAttributesException(toSURL, pinLifetime, - fileLifetime, fileStorageType, spaceToken, expectedFileSize, - transferProtocols, overwriteOption, status, transferURL); - } - this.spaceToken = spaceToken; - this.pinLifetime = pinLifetime; - this.fileLifetime = fileLifetime; - this.fileStorageType = fileStorageType; - this.expectedFileSize = expectedFileSize; - this.overwriteOption = overwriteOption; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#getSpaceToken() - */ - @Override - public final TSpaceToken getSpaceToken() { - - return spaceToken; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#pinLifetime() - */ - @Override - public TLifeTimeInSeconds pinLifetime() { - - return pinLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#fileLifetime() - */ - @Override - public TLifeTimeInSeconds fileLifetime() { - - return fileLifetime; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#fileStorageType() - */ - @Override - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#expectedFileSize() - */ - @Override - public TSizeInBytes expectedFileSize() { - - return expectedFileSize; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.catalogs.PtPData#overwriteOption() - */ - @Override - public TOverwriteMode overwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public void changeStatusSRM_SPACE_AVAILABLE(String explanation) { - - setStatus(TStatusCode.SRM_SPACE_AVAILABLE, explanation); - } - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("PtPChunkData\n"); - sb.append("toSURL="); - sb.append(SURL); - sb.append("; "); - sb.append("pinLifetime="); - sb.append(pinLifetime); - sb.append("; "); - sb.append("fileLifetime="); - sb.append(fileLifetime); - sb.append("; "); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append("; "); - sb.append("spaceToken="); - sb.append(spaceToken); - sb.append("; "); - sb.append("expectedFileSize="); - sb.append(expectedFileSize); - sb.append("; "); - sb.append("transferProtocols="); - sb.append(transferProtocols); - sb.append("; "); - sb.append("overwriteOption="); - sb.append(overwriteOption); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - sb.append("transferURL="); - sb.append(transferURL); - sb.append("; "); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + SURL.hashCode(); - hash = 37 * hash + pinLifetime.hashCode(); - hash = 37 * hash + fileLifetime.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + spaceToken.hashCode(); - hash = 37 * hash + expectedFileSize.hashCode(); - hash = 37 * hash + transferProtocols.hashCode(); - hash = 37 * hash + overwriteOption.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + transferURL.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof AnonymousPtPData)) { - return false; - } - AnonymousPtPData cd = (AnonymousPtPData) o; - return SURL.equals(cd.SURL) && pinLifetime.equals(cd.pinLifetime) - && fileLifetime.equals(cd.fileLifetime) - && fileStorageType.equals(cd.fileStorageType) - && spaceToken.equals(cd.spaceToken) - && expectedFileSize.equals(cd.expectedFileSize) - && transferProtocols.equals(cd.transferProtocols) - && overwriteOption.equals(cd.overwriteOption) && status.equals(cd.status) - && transferURL.equals(cd.transferURL); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java index 31b6a1407..4e88d2187 100644 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/BoLChunkCatalog.java @@ -17,14 +17,31 @@ package it.grid.storm.catalogs; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; import it.grid.storm.config.Configuration; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkData; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TDirOption; @@ -36,20 +53,10 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and - * provides methods for looking up a BoLChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. + * Class that represents StoRMs BoLChunkCatalog: it collects BoLChunkData and provides methods for + * looking up a BoLChunkData based on TRequestToken, as well as for adding a new entry and removing + * an existing one. * * @author CNAF * @date Aug 2009 @@ -57,762 +64,306 @@ */ public class BoLChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(BoLChunkCatalog.class); - - /* only instance of BoLChunkCatalog present in StoRM! */ - private static final BoLChunkCatalog cat = new BoLChunkCatalog(); - private final BoLChunkDAO dao = BoLChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private BoLChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_SUCCESS(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of BoLChunkCatalog available. - */ - public static BoLChunkCatalog getInstance() { - - return cat; - } - - /** - * Method that returns a Collection of BoLChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a BoLChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a message gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkCollection = dao.find(rt); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); - List list = new ArrayList(); - - if (chunkCollection.isEmpty()) { - log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified " - + "request: {}", rt); - return list; - } - - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkCollection) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a BoLChunkData from the received BoLChunkDataTO - * - * @param auxTO - * @param rt - * @return - */ - private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.sulrUniqueID() != null) { - fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.getLifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. " - + "Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(auxTO.getDirOption(), - auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .getProtocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or" - + " could not translate TransferProtocols!"); - /* fail construction of BoLChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - auxTO.getStatus()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.getStatus()); - } else { - status = new TReturnStatus(code, auxTO.getErrString()); - } - // transferURL - /* - * whatever is read is just meaningless because BoL will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make BoLChunkData - BoLPersistentChunkData aux = null; - try { - aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, - transferProtocols, fileSize, status, transferURL, - auxTO.getDeferredStartTime()); - aux.setPrimaryKey(auxTO.getPrimaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedBoLChunk(auxTO); - log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " - + "chunk data from persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique - * ID taken from the BoLChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedBoLChunkDataTO chunkTO, - final ReducedBoLChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, - final BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedBoLChunkData from the data contained in the received - * BoLChunkData - * - * @param chunk - * @return - * @throws InvalidReducedBoLChunkDataAttributesException - */ - private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) - throws InvalidReducedBoLChunkDataAttributesException { - - ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedBoLChunkDataTO from the data contained in the received - * BoLChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { - - ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); - reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); - reducedChunkTO.setStatus(chunkTO.getStatus()); - reducedChunkTO.setErrString(chunkTO.getErrString()); - return reducedChunkTO; - } - - /** - * Checks if the received BoLChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(BoLChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.sulrUniqueID() != null); - } - - /** - * Checks if the received ReducedBoLChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - // TODO MICHELE USER_SURL new method - private boolean isComplete(ReducedBoLChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method used to update into Persistence a retrieved BoLChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(BoLPersistentChunkData cd) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setFileSize(cd.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - cd.getLifeTime().value())); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(cd.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - - dao.update(to); - // TODO MICHELE SURL STORE - // SurlStatusStore.getInstance().storeSurlStatus(cd.getSURL(), - // cd.getStatus().getStatusCode()); - } - - /** - * Refresh method. TODO THIS IS A WORK IN PROGRESS!!!! This method have to - * synch the ChunkData information with the database status. - * - * @param auxTO - * @param BoLPersistentChunkData - * inputChunk - * @return BoLChunkData outputChunk - */ - synchronized public BoLPersistentChunkData refreshStatus( - BoLPersistentChunkData inputChunk) { - - /* Currently not used */ - // Call the dao refresh method to synch with the db status - BoLChunkDataTO auxTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("BoL CHUNK CATALOG: retrieved data {}", auxTO); - if (auxTO == null) { - log.warn("BoL CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, auxTO.getErrString()); - } - inputChunk.setStatus(status); - return inputChunk; - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a messagge gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("BoL CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedBoLChunkData reducedChunkData = null; - for (ReducedBoLChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedBoLChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(TSURL surl, - GridUserInterface user) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupBoLChunkData(TSURL surl) { - - return lookupBoLChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupBoLChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupBoLChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - List list = new ArrayList(); - BoLPersistentChunkData chunk; - for (BoLChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! unable to add missing informations " - + "on DB to the request: {}", e.getMessage()); - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - private BoLPersistentChunkData makeOne(BoLChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, new TRequestToken(chunkTO.getRequestToken(), - chunkTO.getTimeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedBoLChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given GridUser and Collection of - * TSURLs, then an empty Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedBoLChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedBoLChunkData reducedChunkData; - for (ReducedBoLChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("BoL CHUNK CATALOG: returning {}", list); - return list; - } - - /** - * @param auxTO - * @return - */ - private ReducedBoLChunkData makeOneReduced( - ReducedBoLChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedBoLChunkData - ReducedBoLChunkData aux = null; - try { - aux = new ReducedBoLChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedBoLChunkDataAttributesException e) { - log.warn("BoL CHUNK CATALOG! Retrieved malformed " - + "Reduced BoL chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive BoL request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(BoLPersistentChunkData chunkData) { - - BoLChunkDataTO to = new BoLChunkDataTO(); - // needed for now to find ID of request! Must be changed soon! - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field */ - dao.addChild(to); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied BoLChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmBoL. The only fields from BoLChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messages. - */ - synchronized public void add(BoLPersistentChunkData chunkData, - GridUserInterface gu) { - - /* Currently NOT used */ - BoLChunkDataTO to = new BoLChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - // TODO MICHELE USER_SURL fill new fields - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setDeferredStartTime(chunkData.getDeferredStartTime()); - - /* add the entry and update the Primary Key field! */ - dao.addNew(to, gu.getDn()); - chunkData.setPrimaryKey(to.getPrimaryKey()); - } - - /** - * Method used to establish if in Persistence there is a BoLChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_SUCCESS(surl.uniqueId()) > 0); - } - - /** - * Method used to transit the specified Collection of ReducedBoLChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_SUCCESStoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedBoLChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - dao.transitSRM_SUCCESStoSRM_RELEASED(primaryKeys, token); - } - - /** - * This method is intended to be used by srmRm to transit all BoL chunks on - * the given SURL which are in the SRM_FILE_PINNED state, to SRM_ABORTED. The - * supplied String will be used as explanation in those chunks return status. - * The global status of the request is _not_ changed. - * - * The TURL of those requests will automatically be set to empty. Notice that - * both removeAllJit(SURL) and removeVolatile(SURL) are automatically invoked - * on PinnedFilesCatalog, to remove any entry and corresponding physical ACLs. - * - * Beware, that the chunks may be part of requests that have finished, or that - * still have not finished because other chunks are being processed. - */ - synchronized public void transitSRM_SUCCESStoSRM_ABORTED(TSURL surl, - String explanation) { - - /* Currently NOT used */ - if (explanation == null) { - explanation = ""; - } - dao.transitSRM_SUCCESStoSRM_ABORTED(surl.uniqueId(), surl.toString(), - explanation); - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all BoL Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_SUCCESS() { - - dao.transitExpiredSRM_SUCCESS(); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(BoLChunkCatalog.class); + + private final BoLChunkDAO dao; + + private static BoLChunkCatalog instance; + + public static synchronized BoLChunkCatalog getInstance() { + if (instance == null) { + instance = new BoLChunkCatalog(); + } + return instance; + } + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private BoLChunkCatalog() { + + dao = BoLChunkDAOMySql.getInstance(); + } + + /** + * Method that returns a Collection of BoLChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * BoLChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a message gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkCollection = dao.find(rt); + log.debug("BoL CHUNK CATALOG: retrieved data {}", chunkCollection); + List list = new ArrayList(); + + if (chunkCollection.isEmpty()) { + log.warn("BoL CHUNK CATALOG! No chunks found in persistence for specified request: {}", rt); + return list; + } + + BoLPersistentChunkData chunk; + for (BoLChunkDataTO chunkTO : chunkCollection) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedBoLChunkDataAttributesException e) { + log.warn( + "BoL CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("BoL CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a BoLChunkData from the received BoLChunkDataTO + * + * @param auxTO + * @param rt + * @return + */ + private BoLPersistentChunkData makeOne(BoLChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(auxTO.getFromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.sulrUniqueID() != null) { + fromSURL.setUniqueID(auxTO.sulrUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.getLifeTime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. " + + "Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = + new TDirOption(auxTO.getDirOption(), auxTO.getAllLevelRecursive(), auxTO.getNumLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.getProtocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or" + " could not translate TransferProtocols!"); + /* fail construction of BoLChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(auxTO.getFileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.getStatus()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.getStatus()); + } else { + status = new TReturnStatus(code, auxTO.getErrString()); + } + // transferURL + /* + * whatever is read is just meaningless because BoL will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make BoLChunkData + BoLPersistentChunkData aux = null; + try { + aux = new BoLPersistentChunkData(rt, fromSURL, lifeTime, dirOption, transferProtocols, + fileSize, status, transferURL, auxTO.getDeferredStartTime()); + aux.setPrimaryKey(auxTO.getPrimaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.updateStatus(auxTO, SRM_FAILURE, "Request is malformed!"); + log.warn("BoL CHUNK CATALOG! Retrieved malformed BoL " + + "chunk data from persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received BoLChunkDataTO the normalized StFN and the SURL unique ID taken from the + * BoLChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedBoLChunkDataTO chunkTO, final ReducedBoLChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedBoLChunkDataTO from the received BoLChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkDataTO completeTO(BoLChunkDataTO chunkTO, + final BoLPersistentChunkData chunk) throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedBoLChunkData from the data contained in the received BoLChunkData + * + * @param chunk + * @return + * @throws InvalidReducedBoLChunkDataAttributesException + */ + private ReducedBoLChunkData reduce(BoLPersistentChunkData chunk) + throws InvalidReducedBoLChunkDataAttributesException { + + ReducedBoLChunkData reducedChunk = new ReducedBoLChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedBoLChunkDataTO from the data contained in the received BoLChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedBoLChunkDataTO reduce(BoLChunkDataTO chunkTO) { + + ReducedBoLChunkDataTO reducedChunkTO = new ReducedBoLChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.getPrimaryKey()); + reducedChunkTO.setFromSURL(chunkTO.getFromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.sulrUniqueID()); + reducedChunkTO.setStatus(chunkTO.getStatus()); + reducedChunkTO.setErrString(chunkTO.getErrString()); + return reducedChunkTO; + } + + /** + * Checks if the received BoLChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(BoLChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.sulrUniqueID() != null); + } + + /** + * Method used to update into Persistence a retrieved BoLChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(BoLPersistentChunkData cd) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(cd.getPrimaryKey()); + to.setFileSize(cd.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(cd.getStatus().getStatusCode())); + to.setErrString(cd.getStatus().getExplanation()); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(cd.getLifeTime().value())); + // TODO MICHELE USER_SURL fill new fields + to.setNormalizedStFN(cd.getSURL().normalizedStFN()); + to.setSurlUniqueID(new Integer(cd.getSURL().uniqueId())); + + dao.update(to); + } + + /** + * Method used to add into Persistence a new entry. The supplied BoLChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive BoL request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlaying DAO. + */ + synchronized public void addChild(BoLPersistentChunkData chunkData) { + + BoLChunkDataTO to = new BoLChunkDataTO(); + // needed for now to find ID of request! Must be changed soon! + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setDeferredStartTime(chunkData.getDeferredStartTime()); + + /* add the entry and update the Primary Key field */ + dao.addChild(to); + chunkData.setPrimaryKey(to.getPrimaryKey()); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java deleted file mode 100644 index bd19757f8..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDAO.java +++ /dev/null @@ -1,1701 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(BoLChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getDBURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - private final static BoLChunkDAO dao = new BoLChunkDAO(); - - /** - * timer thread that will run a taask to alert when reconnecting is necessary! - */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private BoLChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the BoLChunkDAO. - */ - public static BoLChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The supplied - * BoLChunkData is used to fill in only the DB table where file specific info - * gets recorded: it does _not_ add a new request! So if spurious data is - * supplied, it will just stay there because of a lack of a parent request! - */ - public synchronized void addChild(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - // insertion - try { - - /* WARNING!!!! We are forced to run a query to get the ID of the request, - * which should NOT be so because the corresponding request object should - * have been changed with the extra field! However, it is not possible - * at the moment to perform such change because of strict deadline and - * the change could wreak havoc the code. So we are forced to make this - * query!!! - */ - - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - id.setString(1, to.getRequestToken()); - logWarnings(id.getWarnings()); - - log.debug("BoL CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - logWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillBoLTables(to, request_id); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addChild! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. The supplied BoLChunkData is used to fill in - * all the DB tables where file specific info gets recorded: it _adds_ a new - * request! - */ - public synchronized void addNew(BoLChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; // insert protocols for request. - try { - // begin transaction - con.setAutoCommit(false); - logWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) VALUES (?,?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - /* request type set to bring online */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.BRING_ON_LINE)); - logWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - logWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.getLifeTime()); - logWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(addNew.getWarnings()); - - addNew.setString(5, "New BoL Request resulting from srmCopy invocation."); - logWarnings(addNew.getWarnings()); - - addNew.setString(6, to.getRequestToken()); - logWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - logWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - logWarnings(addNew.getWarnings()); - - addNew.setInt(9, to.getDeferredStartTime()); - logWarnings(addNew.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - logWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - logWarnings(con.getWarnings()); - for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - logWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - logWarnings(addProtocols.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - logWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillBoLTables(to, id_new); - - // end transaction! - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Rolling back! Unable to complete addNew! " - + "BoLChunkDataTO: {}; exception received: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " - + "exception received: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillBoLTables(BoLChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_b = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Bol for request */ - PreparedStatement addBoL = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.getDirOption()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.getAllLevelRecursive()); - logWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.getNumLevel()); - logWarnings(addDirOption.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - logWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_BoL... sourceSURL and TDirOption! - str = "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addBoL = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addBoL.setInt(1, id_do); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(2, requestQueueID); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(3, to.getFromSURL()); - logWarnings(addBoL.getWarnings()); - - addBoL.setString(4, to.normalizedStFN()); - logWarnings(addBoL.getWarnings()); - - addBoL.setInt(5, to.sulrUniqueID()); - logWarnings(addBoL.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; {}", addBoL.toString()); - addBoL.execute(); - logWarnings(addBoL.getWarnings()); - - rs_b = addBoL.getGeneratedKeys(); - int id_g = extractID(rs_b); - - // third fill in status_BoL... - str = "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - logWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - logWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.getStatus()); - logWarnings(addChild.getWarnings()); - - addChild.setString(3, to.getErrString()); - logWarnings(addChild.getWarnings()); - - log.trace("BoL CHUNK DAO: addNew; " + addChild.toString()); - addChild.execute(); - logWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_b); - close(rs_s); - close(addDirOption); - close(addBoL); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved BoLChunkDataTO, back - * into the MySQL DB. Only the fileSize, statusCode and explanation, of - * status_BoL table are written to the DB. Likewise for the request - * pinLifetime. In case of any error, an error message gets logged but no - * exception is thrown. - */ - public synchronized void update(BoLChunkDataTO to) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID)" - + " SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=?" - + " WHERE rb.ID=?"); - logWarnings(con.getWarnings()); - updateFileReq.setLong(1, to.getFileSize()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(2, to.getStatus()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(3, to.getErrString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(4, to.getLifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.normalizedStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(6, to.sulrUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(7, to.getPrimaryKey()); - logWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("BoL CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Bol represented by the received ReducedBoLChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, " - + "sourceSURL_uniqueID=? WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! Method used to refresh the - * BoLChunkDataTO information from the MySQL DB. In this first version, only - * the statusCode is reloaded from the DB. TODO The next version must contains - * all the information related to the Chunk! In case of any error, an error - * message gets logged but no exception is thrown. - */ - public synchronized BoLChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - str = "SELECT statusCode " + "FROM status_BoL " - + "WHERE request_BoLID=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - find.setLong(1, primary_key); - - logWarnings(find.getWarnings()); - log.trace("BoL CHUNK DAO: refresh status method; " + find.toString()); - - rs = find.executeQuery(); - - logWarnings(find.getWarnings()); - BoLChunkDataTO aux = null; - while (rs.next()) { - aux = new BoLChunkDataTO(); - aux.setStatus(rs.getInt("statusCode")); - } - return aux; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding BoLChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, - * request_BoL, status_BoL and request_DirOption. The considered fields are: - * (1) From status_BoL: the ID field which becomes the TOs primary key, and - * statusCode. (2) From request_BoL: sourceSURL (3) From request_queue: - * pinLifetime (4) From request_DirOption: isSourceADirectory, - * alLevelRecursive, numOfLevels In case of any error, a log gets written and - * an empty collection is returned. No exception is thrown. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList protocols = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sb.statusCode<>?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedBoLChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_Bol that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - ReducedBoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedBoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BoL CHUNK DAO: {}", e.getMessage(), e); - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns the number of BoL requests on the given SURL, that are - * in SRM_SUCCESS state. This method is intended to be used by BoLChunkCatalog - * in the isSRM_SUCCESS method invocation. In case of any error, 0 is - * returned. - */ - public synchronized int numberInSRM_SUCCESS(int surlUniqueID) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: numberInSRM_SUCCESS - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rb.ID) " - + "FROM status_BoL sb JOIN request_BoL rb " - + "ON (sb.request_BoLID=rb.ID) " - + "WHERE rb.sourceSURL_uniqueID=? AND sb.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(find.getWarnings()); - - log.trace("BoL CHUNK DAO - numberInSRM_SUCCESS method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - int numberFileSuccessful = 0; - if (rs.next()) { - numberFileSuccessful = rs.getInt(1); - } - return numberFileSuccessful; - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to determine numberInSRM_SUCCESS! " - + "Returning 0! ", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the request to - * SRM_FAILURE and record it in the DB. This operation could potentially fail - * because the source of the malformed problems could be a problematic DB; - * indeed, initially only log messages where recorded. Yet it soon became - * clear that the source of malformed data were the clients and/or FE - * recording info in the DB. In these circumstances the client would see its - * request as being in the SRM_IN_PROGRESS state for ever. Hence the pressing - * need to inform it of the encountered problems. - */ - public synchronized void signalMalformedBoLChunk(BoLChunkDataTO auxTO) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: signalMalformedBoLChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_BoL SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_BoLID=" + auxTO.getPrimaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - log.trace("BoL CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), - e.toString(), e); - } finally { - close(signal); - } - } - - /** - * Method that updates all expired requests in SRM_SUCCESS state, into - * SRM_RELEASED. This is needed when the client forgets to invoke - * srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_SUCCESS() { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitExpiredSRM_SUCCESS - unable to get a valid connection!"); - return new ArrayList(); - } - - HashMap expiredSurlMap = new HashMap(); - String str = null; - PreparedStatement prepStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - ResultSet res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log.trace("BoLChunkDAO! No chunk of BoL request was transited from " - + "SRM_SUCCESS to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* Update status of all successful surls to SRM_RELEASED */ - - prepStatement = null; - try { - - str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " - + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(prepStatement.getWarnings()); - - prepStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(prepStatement.getWarnings()); - - log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", - prepStatement.toString()); - - int count = prepStatement.executeUpdate(); - logWarnings(prepStatement.getWarnings()); - - if (count == 0) { - log.trace("BoLChunkDAO! No chunk of BoL request was" - + " transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoLChunkDAO! {} chunks of BoL requests were transited from " - + "SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoLChunkDAO! Unable to transit expired SRM_SUCCESS chunks of " - + "BoL requests, to SRM_RELEASED! ", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(prepStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG */ - - HashSet pinnedSurlSet = new HashSet(); - try { - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS) - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - ResultSet res = null; - - prepStatement = con.prepareStatement(str); - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException ", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(prepStatement); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - prepStatement = con.prepareStatement(str); - - prepStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - res = prepStatement.executeQuery(); - logWarnings(prepStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("BoLChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - commit(con); - - } catch (SQLException e) { - log.error("BoLChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(prepStatement); - } - - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - ArrayList expiredSurlList = new ArrayList(); - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage()); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that transits chunks in SRM_SUCCESS to SRM_ABORTED, for the given - * SURL: the overall request status of the requests containing that chunk, is - * not changed! The TURL is set to null. Beware, that the chunks may be part - * of requests that have finished, or that still have not finished because - * other chunks are still being processed. - */ - public synchronized void transitSRM_SUCCESStoSRM_ABORTED(int surlUniqueID, - String surl, String explanation) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN request_BoL rb ON sb.request_BoLID=rb.ID " - + "SET sb.statusCode=?, sb.explanation=?, sb.transferURL=NULL " - + "WHERE sb.statusCode=? AND (rb.sourceSURL_uniqueID=? OR rb.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - logWarnings(stmt.getWarnings()); - - stmt.setInt(3, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - logWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_ABORTED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count > 0) { - log.info("BoL CHUNK DAO! {} chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED.", count); - } else { - log.trace("BoL CHUNK DAO! No chunks were transited from SRM_SUCCESS " - + "to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transitSRM_SUCCESStoSRM_ABORTED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates all chunks in SRM_SUCCESS state, into SRM_RELEASED. An - * array of long representing the primary key of each chunk is required: only - * they get the status changed provided their current status is SRM_SUCCESS. - * This method is used during srmReleaseFiles In case of any error nothing - * happens and no exception is thrown, but proper messages get logged. - */ - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL SET statusCode=? " - + "WHERE statusCode=? AND request_BoLID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request " - + "was transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks from SRM_SUCCESS " - + "to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void transitSRM_SUCCESStoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_SUCCESStoSRM_RELEASED(ids); - } else { - /* - * If a request token has been specified, only the related BoL requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("BoL CHUNK DAO: transitSRM_SUCCESStoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_BoL sb JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? " + "WHERE sb.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rb.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - logWarnings(stmt.getWarnings()); - - log.trace("BoL CHUNK DAO - transitSRM_SUCCESStoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BoL CHUNK DAO! No chunk of BoL request was " - + "transited from SRM_SUCCESS to SRM_RELEASED."); - } else { - log.info("BoL CHUNK DAO! {} chunks of BoL requests were transited " - + "from SRM_SUCCESS to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Unable to transit chunks " - + "from SRM_SUCCESS to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close ResultSet! Exception: " + e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("BoL CHUNK DAO! Unable to close Statement {} - Exception: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("BoL, SQL EXception {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("BoL CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("BoL CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("BoL CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } - log.error("BoL CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception( - "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"); - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("BoL CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("BoL CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("BoL CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("BoL CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("BOL CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " - + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "SET sb.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sb.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("BOL CHUNK DAO! No chunk of BOL request was updated from {} " - + "to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("BOL CHUNK DAO! {} chunks of BOL requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", - expectedStatusCode, newStatusCode, e); - } finally { - close(stmt); - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("BoL CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " - + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " - + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " - + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " - + "WHERE ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rb.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("BOL CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - BoLChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new BoLChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sb.statusCode")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); - chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rb.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("BOL CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sb.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rb.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rb.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java deleted file mode 100644 index 3503e1c7a..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLChunkDataTO.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the BoLChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLChunkDataTO { - - /* Database table request_Bol fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private boolean dirOption; // initialised in constructor - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private boolean allLevelRecursive; // initialised in constructor - private int numLevel; // initialised in constructor - private List protocolList = null; // initialised in constructor - private long filesize = 0; - private int status; // initialised in constructor - private String errString = " "; - private int deferredStartTime = -1; - private Timestamp timeStamp = null; - - public BoLChunkDataTO() { - - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - this.dirOption = false; - this.allLevelRecursive = false; - this.numLevel = 0; - } - - public boolean getAllLevelRecursive() { - - return allLevelRecursive; - } - - public int getDeferredStartTime() { - - return deferredStartTime; - } - - public boolean getDirOption() { - - return dirOption; - } - - public String getErrString() { - - return errString; - } - - public long getFileSize() { - - return filesize; - } - - public String getFromSURL() { - - return fromSURL; - } - - public int getLifeTime() { - - return lifetime; - } - - public int getNumLevel() { - - return numLevel; - } - - public long getPrimaryKey() { - - return primaryKey; - } - - public List getProtocolList() { - - return protocolList; - } - - public String getRequestToken() { - - return requestToken; - } - - public Timestamp getTimeStamp() { - - return timeStamp; - } - - public int getStatus() { - - return status; - } - - public void setAllLevelRecursive(boolean b) { - - allLevelRecursive = b; - } - - public void setDeferredStartTime(int deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - } - - public void setDirOption(boolean b) { - - dirOption = b; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFileSize(long n) { - - filesize = n; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public void setNumLevel(int n) { - - numLevel = n; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) { - protocolList = l; - } - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public void setStatus(int n) { - - status = n; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer sulrUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(dirOption); - sb.append(" "); - sb.append(allLevelRecursive); - sb.append(" "); - sb.append(numLevel); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(filesize); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLData.java b/src/main/java/it/grid/storm/catalogs/BoLData.java deleted file mode 100644 index a96bd1d6f..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLData.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a BringOnLineChunkData, that is part of a multifile - * BringOnLine srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLData extends AnonymousFileTransferData { - - private static final Logger log = LoggerFactory.getLogger(BoLData.class); - - /** - * requested lifetime of TURL: it is the pin time! - */ - private TLifeTimeInSeconds lifeTime; - - /** - * specifies if the request regards a directory and related info - */ - private TDirOption dirOption; - - /** - * size of file - */ - private TSizeInBytes fileSize; - - /** - * how many seconds to wait before to make the lifeTime start consuming - */ - private int deferredStartTime = 0; - - public BoLData(TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL, int deferredStartTime) - throws InvalidFileTransferDataAttributesException, - InvalidBoLDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, desiredProtocols, status, transferURL); - if (lifeTime == null || dirOption == null || fileSize == null) { - throw new InvalidBoLDataAttributesException(fromSURL, lifeTime, - dirOption, desiredProtocols, fileSize, status, transferURL); - } - this.lifeTime = lifeTime; - this.dirOption = dirOption; - this.fileSize = fileSize; - this.deferredStartTime = deferredStartTime; - } - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - public int getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption getDirOption() { - - return dirOption; - } - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes getFileSize() { - - return fileSize; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getLifeTime() { - - return lifeTime; - } - - public void setDeferredStartTime(int deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - } - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, then nothing gets set! - */ - public void setFileSize(TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - } - - public void setLifeTime(long lifeTimeInSeconds) { - - TLifeTimeInSeconds lifeTime; - try { - lifeTime = TLifeTimeInSeconds.make(lifeTimeInSeconds, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(), e); - return; - } - - this.lifeTime = lifeTime; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java deleted file mode 100644 index 651686cba..000000000 --- a/src/main/java/it/grid/storm/catalogs/BoLPersistentChunkData.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a BringOnLineChunkData, that is part of a multifile - * BringOnLine srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author CNAF - * @version 1.0 - * @date Aug 2009 - */ -public class BoLPersistentChunkData extends BoLData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(BoLPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Put table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private final TRequestToken requestToken; - - public BoLPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, int deferredStartTime) - throws InvalidBoLPersistentChunkDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidBoLDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL, deferredStartTime); - if (requestToken == null) { - log.debug("BoLPersistentChunkData: requestToken is null!"); - throw new InvalidBoLPersistentChunkDataAttributesException(requestToken, - fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - } - this.requestToken = requestToken; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken getRequestToken() { - - return requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java index d7775310f..e8be1daf4 100644 --- a/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java +++ b/src/main/java/it/grid/storm/catalogs/ChunkDAOUtils.java @@ -1,30 +1,9 @@ package it.grid.storm.catalogs; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLWarning; - public class ChunkDAOUtils { - private static final Logger log = LoggerFactory.getLogger(ChunkDAOUtils.class); - protected ChunkDAOUtils() {} - public static void printWarnings(SQLWarning warning) { - - if (warning != null) { - log.warn("---Warning---"); - - while (warning != null) { - log.warn("Message: {}", warning.getMessage()); - log.warn("SQLState: {}", warning.getSQLState()); - log.warn("Vendor error code: {}", warning.getErrorCode()); - warning = warning.getNextWarning(); - } - } - } - public static String buildInClauseForArray(int size) { StringBuilder b = new StringBuilder(); for (int i=1; i<=size; i++) { diff --git a/src/main/java/it/grid/storm/catalogs/ChunkData.java b/src/main/java/it/grid/storm/catalogs/ChunkData.java deleted file mode 100644 index c79c4f406..000000000 --- a/src/main/java/it/grid/storm/catalogs/ChunkData.java +++ /dev/null @@ -1,11 +0,0 @@ -package it.grid.storm.catalogs; - -public interface ChunkData extends RequestData { - - /** - * Method that returns the primary key in persistence, associated with This - * Chunk. - */ - public long getIdentifier(); - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java deleted file mode 100644 index e09798ce8..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkCatalog.java +++ /dev/null @@ -1,489 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.griduser.GridUserInterface; -// import it.grid.storm.namespace.SurlStatusStore; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class that represents StoRMs CopyChunkCatalog: it collects CopyChunkData and - * provides methods for looking up a CopyChunkData based on TRequestToken, as - * well as for updating an existing one. - * - * @author EGRID - ICTP Trieste - * @date september, 2005 - * @version 2.0 - */ -public class CopyChunkCatalog { - - private static final Logger log = LoggerFactory - .getLogger(CopyChunkCatalog.class); - - /* only instance of CopyChunkCatalog present in StoRM! */ - private static final CopyChunkCatalog cat = new CopyChunkCatalog(); - /* WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private CopyChunkDAO dao = CopyChunkDAO.getInstance(); - - private CopyChunkCatalog() { - - } - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static CopyChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved CopyChunkData. In case - * any error occurs, the operation does not proceed and no Exception is - * thrown. - * - * Beware that the only fields updated into persistence are the StatusCode and - * the errorString. - */ - synchronized public void update(CopyPersistentChunkData cd) { - - CopyChunkDataTO to = new CopyChunkDataTO(); - /* primary key needed by DAO Object */ - to.setPrimaryKey(cd.getPrimaryKey()); - to.setLifeTime(FileLifetimeConverter.getInstance().toDB( - cd.getLifetime().value())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - cd.getStatus().getStatusCode())); - to.setErrString(cd.getStatus().getExplanation()); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - cd.getFileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - cd.getOverwriteOption())); - to.setNormalizedSourceStFN(cd.getSURL().normalizedStFN()); - to.setSourceSurlUniqueID(new Integer(cd.getSURL().uniqueId())); - to.setNormalizedTargetStFN(cd.getDestinationSURL().normalizedStFN()); - to.setTargetSurlUniqueID(new Integer(cd.getDestinationSURL().uniqueId())); - - dao.update(to); - } - - /** - * Method that returns a Collection of CopyChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a CopyChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup( - TRequestToken rt) { - - Collection chunkDataTOs = dao.find(rt); - log.debug("COPY CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs, rt); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs, TRequestToken rt) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - CopyPersistentChunkData chunk; - for (CopyChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedCopyChunkDataAttributesException e) { - log.warn("COPY CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("COPY CHUNK CATALOG: returning {}\n\n", list); - return list; - } - - public Collection lookupCopyChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(requestToken, - surlsUniqueIDs, surlsArray); - return buildChunkDataList(chunkDataTOs, requestToken); - } - - public Collection lookupCopyChunkData(TSURL surl, - GridUserInterface user) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupCopyChunkData(TSURL surl) { - - return lookupCopyChunkData(Arrays.asList(new TSURL[] { surl })); - } - - private Collection lookupCopyChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - return buildChunkDataList(chunkDataTOs); - } - - public Collection lookupCopyChunkData( - List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray); - return buildChunkDataList(chunkDataTOs); - } - - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Generates a CopyChunkData from the received CopyChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private CopyPersistentChunkData makeOne(CopyChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedSourceStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedSourceStFN()); - } - if (chunkDataTO.sourceSurlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.sourceSurlUniqueID().intValue()); - } - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(chunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedTargetStFN() != null) { - toSURL.setNormalizedStFN(chunkDataTO.normalizedTargetStFN()); - } - if (chunkDataTO.targetSurlUniqueID() != null) { - toSURL.setUniqueID(chunkDataTO.targetSurlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - lifeTime = TLifeTimeInSeconds.make(FileLifetimeConverter.getInstance() - .toStoRM(chunkDataTO.lifeTime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(chunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - log.error("\nTFileStorageType could not be translated from its String " - + "representation! String: {}", chunkDataTO.fileStorageType()); - // fail creation of PtPChunk! - fileStorageType = null; - } - // spaceToken! - // - // WARNING! Although this field is in common between StoRM and DPM, a - // converter is still used - // because DPM logic for NULL/EMPTY is not known. StoRM model does not - // allow for null, so it must - // be taken care of! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - // convert empty string representation of DPM into StoRM representation; - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(chunkDataTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode globalOverwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(chunkDataTO.overwriteOption()); - if (globalOverwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be " - + "translated from its String representation! String: " - + chunkDataTO.overwriteOption()); - globalOverwriteOption = null; - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - // make CopyChunkData - CopyPersistentChunkData aux = null; - try { - aux = new CopyPersistentChunkData(rt, fromSURL, toSURL, lifeTime, - fileStorageType, spaceToken, globalOverwriteOption, status); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedCopyChunk(chunkDataTO); - log.warn("COPY CHUNK CATALOG! Retrieved malformed Copy" - + " chunk data from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage()); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received CopyChunkDataTO the normalized StFN and the SURL - * unique ID taken from the CopyChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedCopyChunkDataTO chunkTO, - final ReducedCopyChunkData chunk) { - - chunkTO.setNormalizedSourceStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSourceSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - chunkTO.setNormalizedTargetStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setTargetSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedCopyChunkDataTO from the received CopyChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedCopyChunkDataAttributesException - */ - private ReducedCopyChunkDataTO completeTO(CopyChunkDataTO chunkTO, - final CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedCopyChunkData from the data contained in the received - * CopyChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedCopyChunkData reduce(CopyPersistentChunkData chunk) - throws InvalidReducedCopyChunkDataAttributesException { - - ReducedCopyChunkData reducedChunk = new ReducedCopyChunkData( - chunk.getSURL(), chunk.getDestinationSURL(), chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedCopyChunkDataTO from the data contained in the received - * CopyChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedCopyChunkDataTO reduce(CopyChunkDataTO chunkTO) { - - ReducedCopyChunkDataTO reducedChunkTO = new ReducedCopyChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedSourceStFN(chunkTO.normalizedSourceStFN()); - reducedChunkTO.setSourceSurlUniqueID(chunkTO.sourceSurlUniqueID()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedTargetStFN(chunkTO.normalizedTargetStFN()); - reducedChunkTO.setTargetSurlUniqueID(chunkTO.targetSurlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received CopyChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(CopyChunkDataTO chunkTO) { - - return (chunkTO.normalizedSourceStFN() != null) - && (chunkTO.sourceSurlUniqueID() != null && chunkTO - .normalizedTargetStFN() != null) - && (chunkTO.targetSurlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - @SuppressWarnings("unused") - private boolean isComplete(ReducedCopyChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedSourceStFN() != null) - && (reducedChunkTO.sourceSurlUniqueID() != null && reducedChunkTO - .normalizedTargetStFN() != null) - && (reducedChunkTO.targetSurlUniqueID() != null); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java deleted file mode 100644 index 4e55e5446..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDAO.java +++ /dev/null @@ -1,786 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID - ICTP Trieste - * @version 2.0 - * @date September 2005 - */ -public class CopyChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(CopyChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getDBURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /* Singleton instance */ - private final static CopyChunkDAO dao = new CopyChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private CopyChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the CopyChunkDAO. - */ - public static CopyChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved CopyChunkDataTO, back - * into the MySQL DB. - * - * Only statusCode and explanation, of status_Copy table get written to the - * DB. Likewise for fileLifetime of request_queue table. - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - public synchronized void update(CopyChunkDataTO to) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=?, sc.explanation=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rc.normalized_sourceSURL_StFN=?, rc.sourceSURL_uniqueID=?, rc.normalized_targetSURL_StFN=?, rc.targetSURL_uniqueID=? " - + "WHERE rc.ID=?"); - logWarnings(con.getWarnings()); - - updateFileReq.setInt(1, to.status()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.errString()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.lifeTime()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.fileStorageType()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(5, to.overwriteOption()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedSourceStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.sourceSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(8, to.normalizedTargetStFN()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(9, to.targetSurlUniqueID()); - logWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(10, to.primaryKey()); - logWarnings(updateFileReq.getWarnings()); - - // run updateFileReq - updateFileReq.executeUpdate(); - logWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedCopyChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Copy SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=?, normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedSourceStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.sourceSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setString(3, chunkTO.normalizedTargetStFN()); - logWarnings(stmt.getWarnings()); - - stmt.setInt(4, chunkTO.targetSurlUniqueID()); - logWarnings(stmt.getWarnings()); - - stmt.setLong(5, chunkTO.primaryKey()); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("COPY CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding CopyChunkDataTO - * objects. - * - * A complex query establishes all chunks associated with the request token, - * by properly joining request_queue, request_Copy and status_Copy. The - * considered fields are: - * - * (1) From status_Copy: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Copy: targetSURL and sourceSURL. - * - * (3) From request_queue: fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is returned. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find( - TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sc.statusCode<>?"; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - public synchronized Collection find( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - /* get chunks of the request */ - str = "SELECT rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, strToken); - logWarnings(find.getWarnings()); - - log.debug("COPY CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - - CopyChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedCopyChunk(CopyChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: signalMalformedCopyChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Copy SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_CopyID=" + auxTO.primaryKey(); - - PreparedStatement signal = null; - try { - /* update storm_put_filereq */ - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - logWarnings(signal.getWarnings()); - - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("CopyChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("COPY CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary private method that logs all SQL warnings. - */ - private void logWarnings(SQLWarning w) { - - if (w != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - while ((w = w.getNextWarning()) != null) { - log.debug("COPY CHUNK DAO: {}", w.toString()); - } - } - } - - /** - * Auxiliary method that sets up the conenction to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (SQLException | ClassNotFoundException e) { - log.error("COPY CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private synchronized boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("COPY CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a conenctin to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("COPY CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_queue rq JOIN (status_Copy sc, request_Copy rc) " - + "ON (rq.ID=rc.request_queueID AND sc.request_CopyID=rc.ID) " - + "SET sc.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sc.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - logWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(stmt.getWarnings()); - - log.trace("COPY CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("COPY CHUNK DAO! No chunk of COPY request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("COPY CHUNK DAO! {} chunks of COPY requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("COPY CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - for (int i = 0; i < n; i++) { - sb.append("'"); - sb.append(surls[i]); - sb.append("'"); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("COPY CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - String str = "SELECT rq.r_token, rq.s_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, " - + "rq.fileLifetime, rc.ID, rc.sourceSURL, rc.targetSURL, rc.normalized_sourceSURL_StFN, " - + "rc.sourceSURL_uniqueID, rc.normalized_targetSURL_StFN, rc.targetSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Copy rc, status_Copy sc) " - + "ON (rc.request_queueID=rq.ID AND sc.request_CopyID=rc.ID) " - + "LEFT JOIN request_DirOption d ON rc.request_DirOptionID=d.ID " - + "WHERE ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rc.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - find = con.prepareStatement(str); - logWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("COPY CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - logWarnings(find.getWarnings()); - CopyChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new CopyChunkDataTO(); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setPrimaryKey(rs.getLong("rc.ID")); - chunkDataTO.setFromSURL(rs.getString("rc.sourceSURL")); - chunkDataTO.setNormalizedSourceStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSourceSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setToSURL(rs.getString("rc.targetSURL")); - chunkDataTO.setNormalizedTargetStFN(rs - .getString("rc.normalized_sourceSURL_StFN")); - uniqueID = rs.getInt("rc.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setTargetSurlUniqueID(new Integer(uniqueID)); - } - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("COPY CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sc.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rc.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rc.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java deleted file mode 100644 index 41e197eb1..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyChunkDataTO.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.sql.Timestamp; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the CopyChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date Semptember 2005 - */ -public class CopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String toSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private Timestamp timeStamp = null; - - public CopyChunkDataTO() { - - fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.VOLATILE); - overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - public int lifeTime() { - - return lifetime; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method used to set the FileStorageType: if s is null nothing gets set; the - * internal default String is the one relative to Volatile FileStorageType. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method used to set the OverwriteMode: if s is null nothing gets set; the - * internal default String is the one relative to Never OverwriteMode. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyData.java b/src/main/java/it/grid/storm/catalogs/CopyData.java deleted file mode 100644 index 5d1437fcb..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyData.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyData extends SurlMultyOperationRequestData { - - private static final Logger log = LoggerFactory.getLogger(CopyData.class); - - /** - * SURL to which the srmCopy will put the file - */ - protected TSURL destinationSURL; - - /** - * requested lifetime - BEWARE!!! It is the fileLifetime at destination in - * case of Volatile files! - */ - protected TLifeTimeInSeconds lifetime; - - /** - * TFileStorageType at destination - */ - protected TFileStorageType fileStorageType; - - /** - * SpaceToken to use for toSURL - */ - protected TSpaceToken spaceToken; - - /** - * specifies the behaviour in case of existing files for Put part of the copy - * (could be local or remote!) - */ - protected TOverwriteMode overwriteOption; - - public CopyData(TSURL fromSURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, status); - if (destinationSURL == null || lifetime == null || fileStorageType == null - || spaceToken == null || overwriteOption == null) { - throw new InvalidCopyDataAttributesException(fromSURL, destinationSURL, - lifetime, fileStorageType, spaceToken, overwriteOption, status); - } - this.destinationSURL = destinationSURL; - this.lifetime = lifetime; - this.fileStorageType = fileStorageType; - this.spaceToken = spaceToken; - this.overwriteOption = overwriteOption; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL getDestinationSURL() { - - return destinationSURL; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getLifetime() { - - return lifetime; - } - - /** - * Method that returns the fileStorageType for this chunk of the srm request. - */ - public TFileStorageType getFileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken getSpaceToken() { - - return spaceToken; - } - - /** - * Method that returns the overwriteOption specified in the srm request. - */ - public TOverwriteMode getOverwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, explanation); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java deleted file mode 100644 index 6f08504f5..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyGlobalFlagConverter.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+RemoveSourceFiles global information - * for the whole request, and Flags in storm_req. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopyGlobalFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopyGlobalFlagConverter c = new CopyGlobalFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + DO NOT RemoveSourceFiles 1 ALWAYS + DO NOT RemoveSourceFiles 2 - * WHENFILESAREDIFFERENT + DO NOT RemoveSourceFiles 4 NEVER + - * RemoveSourceFiles 5 ALWAYS + RemoveSourceFiles 6 WHENFILESAREDIFFERENT + - * RemoveSourceFiles - */ - private CopyGlobalFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static CopyGlobalFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and removeSourceFiles boolean. -1 is returned if no match is - * found. - */ - public int toDPM(TOverwriteMode om, boolean removeSourceFiles) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(removeSourceFiles) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java deleted file mode 100644 index c9c1185aa..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopyPersistentChunkData.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a CopyChunkData, that is part of a multifile Copy srm - * request. It contains data about: the requestToken, the fromSURL, the toSURL, - * the target fileLifeTime, the target fileStorageType and any available target - * spaceToken, the target overwriteOption to be applied in case the file already - * exists, the fileSize of the existing file if any, return status of the file - * together with its error string. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class CopyPersistentChunkData extends CopyData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(CopyPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer! - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private TRequestToken requestToken; - - public CopyPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, - TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) - throws InvalidCopyPersistentChunkDataAttributesException, - InvalidCopyDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - if (requestToken == null) { - log.debug("CopyPersistentChunkData: requestToken is null!"); - throw new InvalidCopyPersistentChunkDataAttributesException(requestToken, - fromSURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - } - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken getRequestToken() { - - return requestToken; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java b/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java deleted file mode 100644 index b83a7daa7..000000000 --- a/src/main/java/it/grid/storm/catalogs/CopySpecificFlagConverter.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; - -/** - * Package private auxiliary class used to convert between DPM and StoRM - * representation of Copy TOverwriteMode+TDirOption request specific - * information, and Flags in storm_copy_filereq. - * - * @author: EGRID - ICTP Trieste - * @version: 1.0 - * @date: September 2005 - */ -class CopySpecificFlagConverter { - - private Map DPMtoSTORM = new HashMap(); - private Map STORMtoDPM = new HashMap(); - - private static CopySpecificFlagConverter c = new CopySpecificFlagConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DPM - * uses int values to represent the pair of values: - * - * 0 NEVER + source NOT directory 1 ALWAYS + source NOT directory 2 - * WHENFILESAREDIFFERENT + source NOT directory 4 NEVER + source is directory - * 5 ALWAYS + source is directory 6 WHENFILESAREDIFFERENT + source is - * directory - */ - private CopySpecificFlagConverter() { - - DPMtoSTORM.put(new Integer(0), new Object[] { TOverwriteMode.NEVER, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(1), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(false) }); - DPMtoSTORM.put(new Integer(2), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(false) }); - DPMtoSTORM.put(new Integer(4), new Object[] { TOverwriteMode.NEVER, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(5), new Object[] { TOverwriteMode.ALWAYS, - new Boolean(true) }); - DPMtoSTORM.put(new Integer(6), new Object[] { - TOverwriteMode.WHENFILESAREDIFFERENT, new Boolean(true) }); - Object aux; - for (Iterator i = DPMtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDPM.put(DPMtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of CopySpecificFlagConverter. - */ - public static CopySpecificFlagConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode and isSourceADirectory boolean. -1 is returned if no match - * is found. - */ - public int toDPM(TOverwriteMode om, boolean isSourceADirectory) { - - Integer aux = (Integer) STORMtoDPM.get(new Object[] { om, - new Boolean(isSourceADirectory) }); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns an Object[] containing the TOverwriteMode and the - * Boolean used by StoRM to represent the supplied int representation of DPM. - * An empty Object[] is returned if no StoRM type is found. - */ - public Object[] toSTORM(int n) { - - Object[] aux = (Object[]) DPMtoSTORM.get(new Integer(n)); - if (aux == null) - return new Object[] {}; - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDPMtoSTORM map:" + DPMtoSTORM - + "\nSTORMtoDPM map:" + STORMtoDPM; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java b/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java deleted file mode 100644 index 55391d4d7..000000000 --- a/src/main/java/it/grid/storm/catalogs/DirOptionConverter.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Package private class that translates between DPM flag for TDirOption and - * StoRM TDirOption proper. - * - * In particular DPM uses the int 1 to denote a recursive call, yet it fails to - * distinguish between a chosen recursion level; in other words there is no way - * that DPM specifies the number of levels to recurse: so either you recurse - * till the end or nothing. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date August, 2005 - */ -class DirOptionConverter { - - static private DirOptionConverter converter = null; - - private DirOptionConverter() { - - } - - static public DirOptionConverter getInstance() { - - if (converter == null) - converter = new DirOptionConverter(); - return converter; - } - - /** - * Method that translates the int used by DPM as flag for TDirOption, into a - * boolean for isDirOption. - * - * 1 causes true to be returned; any other value returns 0. - */ - public boolean toSTORM(int n) { - - return (n == 1); - } - - /** - * Method used to translate the boolean isDirOption into an int used by DPM to - * express the same thing. - * - * true gets translated into 1; false into 0. - */ - public int toDPM(boolean isDirOption) { - - if (isDirOption) - return 1; - return 0; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java deleted file mode 100644 index d84d199cf..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileLifetimeConverter.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a pinLifetime as expressed by a - * TLifetimeInSeconds objects; in particular it takes care of protocol - * specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class FileLifetimeConverter { - - private static FileLifetimeConverter stc = new FileLifetimeConverter(); // only - // instance - - private FileLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static FileLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getFileLifetimeDefault() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s <= 0) - return Configuration.getInstance().getFileLifetimeDefault(); - return new Integer(s).longValue(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java deleted file mode 100644 index 2e27fdb1a..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileStorageTypeConverter.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB raw data and StoRM - * object model representation of TFileStorageType. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -class FileStorageTypeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static FileStorageTypeConverter c = new FileStorageTypeConverter(); - - /** - * Private constructor that fills in the conversion tables; - * - * V - VOLATILE P - PERMANENT D - DURABLE - */ - private FileStorageTypeConverter() { - - DBtoSTORM.put("V", TFileStorageType.VOLATILE); - DBtoSTORM.put("P", TFileStorageType.PERMANENT); - DBtoSTORM.put("D", TFileStorageType.DURABLE); - String aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of FileStorageTypeConverter. - */ - public static FileStorageTypeConverter getInstance() { - - return c; - } - - /** - * Method that returns the String used in the DB to represent the given - * TFileStorageType. The empty String "" is returned if no match is found. - */ - public String toDB(TFileStorageType fst) { - - String aux = (String) STORMtoDB.get(fst); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TFileStorageType used by StoRM to represent the - * supplied String representation in the DB. A configured default - * TFileStorageType is returned in case no corresponding StoRM type is found. - * TFileStorageType.EMPTY is returned if there are configuration errors. - */ - public TFileStorageType toSTORM(String s) { - - TFileStorageType aux = DBtoSTORM.get(s); - if (aux == null) - // This case is that the String s is different from V,P or D. - aux = DBtoSTORM.get(Configuration.getInstance() - .getDefaultFileStorageType()); - if (aux == null) - // This case should never happen, but in case we prefer ponder PERMANENT. - return TFileStorageType.EMPTY; - else - return aux; - } - - public String toString() { - - return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/FileTransferData.java b/src/main/java/it/grid/storm/catalogs/FileTransferData.java deleted file mode 100644 index 505b7cba0..000000000 --- a/src/main/java/it/grid/storm/catalogs/FileTransferData.java +++ /dev/null @@ -1,25 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TTURL; - -public interface FileTransferData extends SynchMultyOperationRequestData { - - /** - * Method that returns a TURLPrefix containing the transfer protocols desired - * for this chunk of the srm request. - */ - public TURLPrefix getTransferProtocols(); - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - public TTURL getTransferURL(); - - /** - * Method used to set the transferURL associated to the SURL of this chunk. If - * TTURL is null, then nothing gets set! - */ - public void setTransferURL(final TTURL turl); - -} diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java deleted file mode 100644 index 7a06f1db9..000000000 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtGData.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.synchcall.data.IdentityInputData; - -public class IdentityPtGData extends AnonymousPtGData implements - IdentityInputData { - - private final GridUserInterface auth; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public IdentityPtGData(GridUserInterface auth, TSURL SURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) throws InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException, IllegalArgumentException { - - super(SURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - if (auth == null) { - throw new IllegalArgumentException( - "Unable to create the object, invalid arguments: auth=" + auth); - } - this.auth = auth; - } - - @Override - public GridUserInterface getUser() { - - return auth; - } - - @Override - public String getPrincipal() { - - return this.auth.getDn(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java b/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java deleted file mode 100644 index af35bc9bb..000000000 --- a/src/main/java/it/grid/storm/catalogs/IdentityPtPData.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * - */ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; -import it.grid.storm.synchcall.data.IdentityInputData; - -/** - * @author Michele Dibenedetto - * - */ -public class IdentityPtPData extends AnonymousPtPData implements - IdentityInputData { - - private final GridUserInterface auth; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public IdentityPtPData(GridUserInterface auth, TSURL SURL, - TLifeTimeInSeconds pinLifetime, TLifeTimeInSeconds fileLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) - throws InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException, IllegalArgumentException { - - super(SURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, transferURL); - if (auth == null) { - throw new IllegalArgumentException( - "Unable to create the object, invalid arguments: auth=" + auth); - } - this.auth = auth; - } - - @Override - public GridUserInterface getUser() { - - return auth; - } - - @Override - public String getPrincipal() { - - return this.auth.getDn(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java deleted file mode 100644 index 5e782876e..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLChunkDataAttributesException.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of BoLChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols, - * fileSize, status, transferURL. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ -public class InvalidBoLChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 5657310881067434280L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullLifeTime; - private boolean nullDirOption; - private boolean nullTransferProtocols; - private boolean nullFileSize; - private boolean nullStatus; - private boolean nullTransferURL; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidBoLChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullTransferProtocols = transferProtocols == null; - nullFileSize = fileSize == null; - nullStatus = status == null; - nullTransferURL = transferURL == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid BoLChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; nul-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-lifeTime="); - sb.append(nullLifeTime); - sb.append("; null-dirOption="); - sb.append(nullDirOption); - sb.append("; null-transferProtocols="); - sb.append(nullTransferProtocols); - sb.append("; null-fileSize="); - sb.append(nullFileSize); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-transferURL="); - sb.append(nullTransferURL); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java deleted file mode 100644 index 41a9a9afc..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLDataAttributesException.java +++ /dev/null @@ -1,94 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidBoLDataAttributesException extends - InvalidFileTransferDataAttributesException { - - private static final long serialVersionUID = 8113403994527678088L; - // booleans that indicate whether the corresponding variable is null - protected boolean nullLifeTime; - protected boolean nullDirOption; - protected boolean nullFileSize; - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - super(fromSURL, transferProtocols, status, transferURL); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, String message) { - - super(fromSURL, transferProtocols, status, transferURL, message); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, Throwable cause) { - - super(fromSURL, transferProtocols, status, transferURL, cause); - init(lifeTime, dirOption, fileSize); - } - - public InvalidBoLDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL, String message, Throwable cause) { - - super(fromSURL, transferProtocols, status, transferURL, message, cause); - init(lifeTime, dirOption, fileSize); - } - - private void init(TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TSizeInBytes fileSize) { - - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullFileSize = fileSize == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidBoLDataAttributesException [nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java deleted file mode 100644 index a47e5433c..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidBoLPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidBoLPersistentChunkDataAttributesException extends - InvalidBoLDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidBoLPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) { - - super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidBoLPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java deleted file mode 100644 index 01363fd8e..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyChunkDataAttributesException.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of CopyChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromsURL, toSURL, lifetime, fileStorageType, - * spaceToken, overwriteOption, status. - * - * @author EGRID - ICTP Trieste - * @date September, 2005 - * @version 2.0 - */ -public class InvalidCopyChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 6786154038995023512L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullLifetime; - private boolean nullFileStorageType; - private boolean nullSpaceToken; - private boolean nullOverwriteOption; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidCopyChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TSURL toSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption, TReturnStatus status) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - nullStatus = status == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-lifetime="); - sb.append(nullLifetime); - sb.append("; null-filestorageType="); - sb.append(nullFileStorageType); - sb.append("; null-spaceToken="); - sb.append(nullSpaceToken); - sb.append("; null-overwriteOption="); - sb.append(nullOverwriteOption); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java deleted file mode 100644 index 8af415056..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyDataAttributesException.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyDataAttributesException extends - InvalidSurlRequestDataAttributesException { - - private static final long serialVersionUID = -1217486426437414490L; - protected boolean nullDestinationSURL; - protected boolean nullLifetime; - protected boolean nullFileStorageType; - protected boolean nullSpaceToken; - protected boolean nullOverwriteOption; - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, status); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, status, message); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, status, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - public InvalidCopyDataAttributesException(TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, status, message, cause); - init(destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption); - } - - private void init(TSURL destinationSURL, TLifeTimeInSeconds lifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TOverwriteMode overwriteOption) { - - nullDestinationSURL = destinationSURL == null; - nullLifetime = lifetime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullOverwriteOption = overwriteOption == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidCopyDataAttributesException [nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java deleted file mode 100644 index 77cdb8dcd..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidCopyPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidCopyPersistentChunkDataAttributesException extends - InvalidCopyDataAttributesException { - - /** - * - */ - private static final long serialVersionUID = 1266996505954208061L; - private boolean nullRequestToken; - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, TReturnStatus status) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, cause); - init(requestToken); - } - - public InvalidCopyPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL SURL, TSURL destinationSURL, - TLifeTimeInSeconds lifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TOverwriteMode overwriteOption, - TReturnStatus status, String message, Throwable cause) { - - super(SURL, destinationSURL, lifetime, fileStorageType, spaceToken, - overwriteOption, status, message, cause); - init(requestToken); - } - - private void init(TRequestToken requestToken) { - - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidCopyPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullDestinationSURL="); - builder.append(nullDestinationSURL); - builder.append(", nullLifetime="); - builder.append(nullLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java deleted file mode 100644 index fc28c0743..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidFileTransferDataAttributesException.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - */ -public class InvalidFileTransferDataAttributesException extends - InvalidSurlRequestDataAttributesException { - - private static final long serialVersionUID = 4416318501544415810L; - protected boolean nullTransferProtocols; - protected boolean nullTransferURL; - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL) { - - super(SURL, status); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - String message) { - - super(SURL, status, message); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - Throwable cause) { - - super(SURL, status, cause); - init(transferProtocols, transferURL); - } - - public InvalidFileTransferDataAttributesException(TSURL SURL, - TURLPrefix transferProtocols, TReturnStatus status, TTURL transferURL, - String message, Throwable cause) { - - super(SURL, status, message, cause); - init(transferProtocols, transferURL); - } - - private void init(TURLPrefix transferProtocols, TTURL transferURL) { - - nullTransferProtocols = transferProtocols == null; - nullTransferURL = transferURL == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidFileTransferDataAttributesException [nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java deleted file mode 100644 index 42ed5c4eb..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGDataAttributesException.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtGChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, lifeTime, numOfLevels, transferProtocols, - * fileSize, status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date March 23rd, 2005 - * @version 3.0 - */ -public class InvalidPtGDataAttributesException extends - InvalidFileTransferDataAttributesException { - - private static final long serialVersionUID = -3484929474636108262L; - // booleans that indicate whether the corresponding variable is null - protected boolean nullLifeTime; - protected boolean nullDirOption; - protected boolean nullFileSize; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGDataAttributesException(TSURL fromSURL, - TLifeTimeInSeconds lifeTime, TDirOption dirOption, - TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, - TTURL transferURL) { - - super(fromSURL, transferProtocols, status, transferURL); - nullLifeTime = lifeTime == null; - nullDirOption = dirOption == null; - nullFileSize = fileSize == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidPtGChunkDataAttributesException [nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java deleted file mode 100644 index 9ab9dcadb..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtGPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidPtGPersistentChunkDataAttributesException extends - InvalidPtGDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) { - - super(fromSURL, lifeTime, dirOption, transferProtocols, fileSize, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidPtGPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullLifeTime="); - builder.append(nullLifeTime); - builder.append(", nullDirOption="); - builder.append(nullDirOption); - builder.append(", nullFileSize="); - builder.append(nullFileSize); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java deleted file mode 100644 index 55d445e35..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPDataAttributesException.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; - -/** - * @author Michele Dibenedetto - */ -public class InvalidPtPDataAttributesException extends - InvalidFileTransferDataAttributesException { - - /** - * - */ - private static final long serialVersionUID = 1051060981188652979L; - protected boolean nullSpaceToken; - protected boolean nullPinLifetime; - protected boolean nullFileLifetime; - protected boolean nullFileStorageType; - protected boolean nullKnownSizeOfThisFile; - protected boolean nullOverwriteOption; - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) { - - super(toSURL, transferProtocols, status, transferURL); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - String message) { - - super(toSURL, transferProtocols, status, transferURL, message); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - Throwable cause) { - - super(toSURL, transferProtocols, status, transferURL, cause); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - public InvalidPtPDataAttributesException(TSURL toSURL, - TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, - TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, - String message, Throwable cause) { - - super(toSURL, transferProtocols, status, transferURL, message, cause); - init(spaceToken, fileLifetime, pinLifetime, fileStorageType, - knownSizeOfThisFile, overwriteOption); - } - - private void init(TSpaceToken spaceToken, TLifeTimeInSeconds fileLifetime, - TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, - TSizeInBytes knownSizeOfThisFile, TOverwriteMode overwriteOption) { - - nullSpaceToken = spaceToken == null; - nullPinLifetime = pinLifetime == null; - nullFileLifetime = fileLifetime == null; - nullFileStorageType = fileStorageType == null; - nullKnownSizeOfThisFile = knownSizeOfThisFile == null; - nullOverwriteOption = overwriteOption == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidPtPDataAttributesException [nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullPinLifetime="); - builder.append(nullPinLifetime); - builder.append(", nullFileLifetime="); - builder.append(nullFileLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullKnownSizeOfThisFile="); - builder.append(nullKnownSizeOfThisFile); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java deleted file mode 100644 index cb191a997..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidPtPPersistentChunkDataAttributesException.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtPChunkData are invalid, that is if any of the following is - * _null_: requestToken, toSURL, lifetime, fileStorageType, spaceToken, - * knownSizeOfThisFile, TURLPrefix transferProtocols, overwriteOption, fileSize, - * status, transferURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class InvalidPtPPersistentChunkDataAttributesException extends - InvalidPtPDataAttributesException { - - private static final long serialVersionUID = -5117535717125685975L; - /** - * booleans that indicate whether the corresponding variable is null - */ - boolean nullRequestToken; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtPPersistentChunkDataAttributesException( - TRequestToken requestToken, TSURL toSURL, TLifeTimeInSeconds fileLifetime, - TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes knownSizeOfThisFile, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) { - - super(toSURL, fileLifetime, pinLifetime, fileStorageType, spaceToken, - knownSizeOfThisFile, transferProtocols, overwriteOption, status, - transferURL); - nullRequestToken = requestToken == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder - .append("InvalidPtPPersistentChunkDataAttributesException [nullRequestToken="); - builder.append(nullRequestToken); - builder.append(", nullSpaceToken="); - builder.append(nullSpaceToken); - builder.append(", nullPinLifetime="); - builder.append(nullPinLifetime); - builder.append(", nullFileLifetime="); - builder.append(nullFileLifetime); - builder.append(", nullFileStorageType="); - builder.append(nullFileStorageType); - builder.append(", nullKnownSizeOfThisFile="); - builder.append(nullKnownSizeOfThisFile); - builder.append(", nullOverwriteOption="); - builder.append(nullOverwriteOption); - builder.append(", nullSURL="); - builder.append(nullSURL); - builder.append(", nullTransferProtocols="); - builder.append(nullTransferProtocols); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append(", nullTransferURL="); - builder.append(nullTransferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java deleted file mode 100644 index bd1e35c03..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedCopyChunkDataAttributesException.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TReturnStatus; - -/** - * This class represents an exception thrown when the attributes supplied to the - * constructor of ReducedCopyChunkData are invalid, that is if any of the - * following is _null_: fromsURL, toSURL, status. - * - * @author Michele Dibenedetto - */ -@SuppressWarnings("serial") -public class InvalidReducedCopyChunkDataAttributesException extends Exception { - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullToSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedCopyChunkDataAttributesException(TSURL fromSURL, - TSURL toSURL, TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullToSURL = toSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid CopyChunkData attributes: null-requestToken="); - sb.append("; null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java deleted file mode 100644 index 9b2847b73..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtPChunkDataAttributesException.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedPtPChunkData are invalid, that is if any is _null_. - * - * @author EGRID - ICTP Trieste - * @date January, 2007 - * @version 1.0 - */ -public class InvalidReducedPtPChunkDataAttributesException extends Exception { - - private static final long serialVersionUID = 4945626188325362854L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullToSURL; - private boolean nullStatus; - private boolean nullFileStorageType; - private boolean nullFileLifetime; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedPtPChunkDataAttributesException(TSURL toSURL, - TReturnStatus status, TFileStorageType fileStorageType, - TLifeTimeInSeconds fileLifetime) { - - nullFileStorageType = fileStorageType == null; - nullToSURL = toSURL == null; - nullStatus = status == null; - nullFileLifetime = fileLifetime == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtPChunkData attributes: null-toSURL="); - sb.append(nullToSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-fileStorageType="); - sb.append(nullFileStorageType); - sb.append("; null-fileLifetime="); - sb.append(nullFileLifetime); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java deleted file mode 100644 index 6021de690..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidRequestSummaryDataAttributesException.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents an Exception thrown when a RequestSummaryData object is - * created with any invalid attributes: null TRequestType, null TRequestToken, - * null VomsGridUser. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 3.0 - */ -public class InvalidRequestSummaryDataAttributesException extends Exception { - - private static final long serialVersionUID = -7729349713696058669L; - - // booleans true if the corresponding variablesare null or negative - private boolean nullRequestType = true; - private boolean nullRequestToken = true; - private boolean nullVomsGridUser = true; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidRequestSummaryDataAttributesException(TRequestType requestType, - TRequestToken requestToken, GridUserInterface gu) { - - nullRequestType = (requestType == null); - nullRequestToken = (requestToken == null); - nullVomsGridUser = (gu == null); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid RequestSummaryData attributes exception: "); - sb.append("nullRequestType="); - sb.append(nullRequestType); - sb.append("; nullRequestToken="); - sb.append(nullRequestToken); - sb.append("; nullVomsGridUser="); - sb.append(nullVomsGridUser); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java b/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java deleted file mode 100644 index 5a252b65f..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidRetrievedDataException.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * is asked to retrieve info from the persistence but the raw data is invalid - * and does not allow a well-formed domain obejcts to be created. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class InvalidRetrievedDataException extends Exception { - - private static final long serialVersionUID = -3645913441787012438L; - - private String requestToken; - private String requestType; - private int totalFilesInThisRequest; - private int numOfQueuedRequests; - private int numOfProgressing; - private int numFinished; - private boolean isSuspended; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidRetrievedDataException(String requestToken, String requestType, - int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished, boolean isSuspended) { - - this.requestToken = requestToken; - this.requestType = requestType; - this.totalFilesInThisRequest = totalFilesInThisRequest; - this.numOfQueuedRequests = numOfQueuedRequests; - this.numOfProgressing = numOfProgressingRequests; - this.numFinished = numFinished; - this.isSuspended = isSuspended; - } - - public String toString() { - - return "InvalidRetrievedDataException: token=" + requestToken + " type=" - + requestType + " total-files=" + totalFilesInThisRequest + " queued=" - + numOfQueuedRequests + " progressing=" + numOfProgressing + " finished=" - + numFinished + " isSusp=" + isSuspended; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java deleted file mode 100644 index 363d6d895..000000000 --- a/src/main/java/it/grid/storm/catalogs/InvalidSurlRequestDataAttributesException.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -/** - * @author Michele Dibenedetto - * - */ -public class InvalidSurlRequestDataAttributesException extends Exception { - - private static final long serialVersionUID = -8636768167720753989L; - protected boolean nullSURL; - protected boolean nullStatus; - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status) { - - super(); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, String message) { - - super(message); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, Throwable cause) { - - super(cause); - init(SURL, status); - } - - public InvalidSurlRequestDataAttributesException(TSURL SURL, - TReturnStatus status, String message, Throwable cause) { - - super(message, cause); - init(SURL, status); - } - - private void init(TSURL SURL, TReturnStatus status) { - - nullSURL = SURL == null; - nullStatus = status == null; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("InvalidSurlRequestDataAttributesException [nullSURL="); - builder.append(nullSURL); - builder.append(", nullStatus="); - builder.append(nullStatus); - builder.append("]"); - return builder.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/JiTData.java b/src/main/java/it/grid/storm/catalogs/JiTData.java deleted file mode 100644 index 4c3e4eaee..000000000 --- a/src/main/java/it/grid/storm/catalogs/JiTData.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -/** - * Class that represents data associated to JiT entries. It contains a String - * representing the file, an int representing the ACL, an int representing the - * user UID, an int representing the user GID. - * - * @author EGRID - ICTP Trieste - * @version 1.0 - * @date November 2006 - */ -public class JiTData { - - private String file = ""; - private int uid = -1; - private int gid = -1; - private int acl = -1; - - /** - * Constructor requiring the complete name of the file as String, the acl as - * int, the uid and primary gid of the LocalUser bith as int. - */ - public JiTData(String file, int acl, int uid, int gid) { - - this.file = file; - this.acl = acl; - this.uid = uid; - this.gid = gid; - } - - public String pfn() { - - return file; - } - - public int acl() { - - return acl; - } - - public int uid() { - - return uid; - } - - public int gid() { - - return gid; - } - - public String toString() { - - return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java b/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java deleted file mode 100644 index 134b13ff2..000000000 --- a/src/main/java/it/grid/storm/catalogs/MultipleDataEntriesException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds more than one row of data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class MultipleDataEntriesException extends Exception { - - private static final long serialVersionUID = 427636739469695868L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public MultipleDataEntriesException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "MultipleDataEntriesException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java b/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java deleted file mode 100644 index bc44544a9..000000000 --- a/src/main/java/it/grid/storm/catalogs/NoDataFoundException.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TRequestToken; - -/** - * Class that represents an Exception thrown by the ReservedSpaceCatalog when it - * finds no data for the specified request. - * - * @author: EGRID ICTP - * @version: 1.0 - * @date: June 2005 - */ -public class NoDataFoundException extends Exception { - - private static final long serialVersionUID = -718255813130266566L; - - private TRequestToken requestToken; - - /** - * Constructor tha trequires the attributes that caused the exception to be - * thrown. - */ - public NoDataFoundException(TRequestToken requestToken) { - - this.requestToken = requestToken; - } - - public String toString() { - - return "NoDataFoundException: requestToken=" + requestToken; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java b/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java deleted file mode 100644 index ddcf6eda6..000000000 --- a/src/main/java/it/grid/storm/catalogs/OverwriteModeConverter.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.config.Configuration; - -/** - * Package private auxiliary class used to convert between DB and StoRM object - * model representation of TOverwriteMode. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -public class OverwriteModeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static OverwriteModeConverter c = new OverwriteModeConverter(); - - /** - * Private constructor that fills in the conversion table; in particular, DB - * uses String values to represent TOverwriteMode: - * - * N NEVER A ALWAYS D WHENFILESAREDIFFERENT - */ - private OverwriteModeConverter() { - - DBtoSTORM.put("N", TOverwriteMode.NEVER); - DBtoSTORM.put("A", TOverwriteMode.ALWAYS); - DBtoSTORM.put("D", TOverwriteMode.WHENFILESAREDIFFERENT); - Object aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of OverwriteModeConverter. - */ - public static OverwriteModeConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used by DPM to represent the given - * TOverwriteMode. "" is returned if no match is found. - */ - public String toDB(TOverwriteMode om) { - - String aux = (String) STORMtoDB.get(om); - if (aux == null) - return ""; - return aux; - } - - /** - * Method that returns the TOverwriteMode used by StoRM to represent the - * supplied String representation of DPM. A configured default TOverwriteMode - * is returned in case no corresponding StoRM type is found. - * TOverwriteMode.EMPTY is returned if there are configuration errors. - */ - public TOverwriteMode toSTORM(String s) { - - TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s); - if (aux == null) - aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance() - .getDefaultOverwriteMode()); - if (aux == null) - return TOverwriteMode.EMPTY; - else - return aux; - } - - public String toString() { - - return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM - + "\nSTORMtoDB map:" + STORMtoDB; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java deleted file mode 100644 index 59f59d81e..000000000 --- a/src/main/java/it/grid/storm/catalogs/PersistentChunkData.java +++ /dev/null @@ -1,11 +0,0 @@ -package it.grid.storm.catalogs; - -public interface PersistentChunkData extends ChunkData { - - /** - * Method that returns the primary key in persistence, associated with This - * Chunk. - */ - public long getPrimaryKey(); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java b/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java deleted file mode 100644 index 1904e57a2..000000000 --- a/src/main/java/it/grid/storm/catalogs/PinLifetimeConverter.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.config.Configuration; - -/** - * Class that handles DB representation of a TLifetimeInSeconds, in particular - * it takes care of protocol specification: - * - * 0/null/negative are translated as default StoRM configurable values. StoRMs - * Empty TLifeTimeInSeconds is translated as 0. - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2007 - */ -public class PinLifetimeConverter { - - private static PinLifetimeConverter stc = new PinLifetimeConverter(); // only - // instance - - private PinLifetimeConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static PinLifetimeConverter getInstance() { - - return stc; - } - - /** - * Method that translates the Empty TLifeTimeInSeconds into the empty - * representation of DB which is 0. Any other value is left as is. - */ - public int toDB(long l) { - - if (l == TLifeTimeInSeconds.makeEmpty().value()) - return 0; - return new Long(l).intValue(); - } - - /** - * Method that returns the long corresponding to the int value in the DB, - * except if it is 0, NULL or negative; a configurable default value is - * returned instead, corresponding to the getPinLifetimeMinimum() - * Configuration class method. - */ - public long toStoRM(int s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return new Integer(s).longValue(); - } - - public long toStoRM(long s) { - - if (s == 0) { - return Configuration.getInstance().getPinLifetimeDefault(); - } else if (s < 0) { - // The default is used also as a Minimum - return Configuration.getInstance().getPinLifetimeDefault(); - } - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java index 307ed1a19..91d35564b 100644 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtGChunkCatalog.java @@ -17,6 +17,12 @@ package it.grid.storm.catalogs; +import java.util.ArrayList; +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; @@ -24,11 +30,21 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.PtGPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkData; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.InvalidTDirOptionAttributesException; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; -import it.grid.storm.srm.types.InvalidTTURLAttributesException; import it.grid.storm.srm.types.TDirOption; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -38,820 +54,349 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and - * provides methods for looking up a PtGChunkData based on TRequestToken, as - * well as for adding a new entry and removing an existing one. + * Class that represents StoRMs PtGChunkCatalog: it collects PtGChunkData and provides methods for + * looking up a PtGChunkData based on TRequestToken, as well as for adding a new entry and removing + * an existing one. * * @author EGRID - ICTP Trieste * @date April 26th, 2005 * @version 4.0 */ -@SuppressWarnings("unused") public class PtGChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtGChunkCatalog.class); - - /* Only instance of PtGChunkCatalog present in StoRM! */ - private static final PtGChunkCatalog cat = new PtGChunkCatalog(); - private final PtGChunkDAO dao = PtGChunkDAO.getInstance(); - - /* - * Timer object in charge of transiting expired requests from SRM_FILE_PINNED - * to SRM_RELEASED! - */ - private final Timer transiter = new Timer(); - /* Delay time before starting cleaning thread! */ - private final long delay = Configuration.getInstance() - .getTransitInitialDelay() * 1000; - /* Period of execution of cleaning! */ - private final long period = Configuration.getInstance() - .getTransitTimeInterval() * 1000; - - /** - * Private constructor that starts the internal timer needed to periodically - * check and transit requests whose pinLifetime has expired and are in - * SRM_FILE_PINNED, to SRM_RELEASED. - */ - private PtGChunkCatalog() { - - TimerTask transitTask = new TimerTask() { - - @Override - public void run() { - - transitExpiredSRM_FILE_PINNED(); - } - }; - transiter.scheduleAtFixedRate(transitTask, delay, period); - } - - /** - * Method that returns the only instance of PtGChunkCatalog available. - */ - public static PtGChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtGChunkData. In case - * any error occurs, the operation does not proceed but no Exception is - * thrown. Error messages get logged. - * - * Only fileSize, StatusCode, errString and transferURL are updated. Likewise - * for the request pinLifetime. - */ - synchronized public void update(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* Primary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setFileSize(chunkData.getFileSize().value()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTurl(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setLifeTime(PinLifetimeConverter.getInstance().toDB( - chunkData.getPinLifeTime().value())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Refresh method. THIS IS A WORK IN PROGRESS!!!! This method have to synch - * the ChunkData information with the database status intended as the status - * code and the TURL - * - * @param auxTO - * @param PtGChunkData - * inputChunk - * @return PtGChunkData outputChunk - */ - synchronized public PtGPersistentChunkData refreshStatus( - PtGPersistentChunkData inputChunk) { - - PtGChunkDataTO chunkDataTO = dao.refresh(inputChunk.getPrimaryKey()); - - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkDataTO); - if (chunkDataTO == null) { - log.warn("PtG CHUNK CATALOG! Empty TO found in persistence for specified " - + "request: {}", inputChunk.getPrimaryKey()); - return inputChunk; - } - - /* - * In this first version the only field updated is the Status. Once - * updated, the new status is rewritten into the input ChunkData - */ - - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); - if (code != TStatusCode.EMPTY) { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - inputChunk.setStatus(status); - TTURL turl = null; - try { - turl = TTURL.makeFromString(chunkDataTO.turl()); - } catch (InvalidTTURLAttributesException e) { - log.info("PtGChunkCatalog (FALSE-ERROR-in-abort-refresh-status?):" - + " built a TURL with protocol NULL (retrieved from the DB..)"); - } - inputChunk.setTransferURL(turl); - return inputChunk; - } - - /** - * Method that returns a Collection of PtGChunkData Objects matching the - * supplied TRequestToken. - * - * If any of the data associated to the TRequestToken is not well formed and - * so does not allow a PtGChunkData Object to be created, then that part of - * the request is dropped and gets logged, and the processing continues with - * the next part. All valid chunks get returned: the others get dropped. - * - * If there are no chunks to process then an empty Collection is returned, and - * a messagge gets logged. - */ - synchronized public Collection lookup(TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); - ArrayList list = new ArrayList(); - if (chunkTOs.isEmpty()) { - log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " - + "specified request: {}", rt); - return list; - } - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkTOs) { - chunk = makeOne(chunkTO, rt); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on DB " - + "to the request: {}", e.getMessage()); - } - } - log.debug("PtG CHUNK CATALOG: returning " + list); - return list; - } - - /** - * Generates a PtGChunkData from the received PtGChunkDataTO - * - * @param chunkDataTO - * @param rt - * @return - */ - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, - TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (chunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); - } - if (chunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); - } - // lifeTime - TLifeTimeInSeconds lifeTime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - chunkDataTO.lifeTime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed." - + " Drop the value to the max = {} seconds", max); - pinLifeTime = max; - } - lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // dirOption - TDirOption dirOption = null; - try { - dirOption = new TDirOption(chunkDataTO.dirOption(), - chunkDataTO.allLevelRecursive(), chunkDataTO.numLevel()); - } catch (InvalidTDirOptionAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter - .toSTORM(chunkDataTO.protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols or could " - + "not translate TransferProtocols!"); - /* fail construction of PtGChunkData! */ - transferProtocols = null; - } - // fileSize - TSizeInBytes fileSize = null; - try { - fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - chunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + chunkDataTO.status()); - } else { - status = new TReturnStatus(code, chunkDataTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (chunkDataTO.vomsAttributes() != null - && !chunkDataTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), - chunkDataTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation." - + " IllegalArgumentException: {}", e.getMessage(), e); - } - // transferURL - /* - * whatever is read is just meaningless because PtG will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtGChunkData - PtGPersistentChunkData aux = null; - try { - aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, - dirOption, transferProtocols, fileSize, status, transferURL); - aux.setPrimaryKey(chunkDataTO.primaryKey()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtGChunk(chunkDataTO); - log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " - + "persistence. Dropping chunk from request {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtGChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtGChunkDataTO chunkTO, - final ReducedPtGChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, - final PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtGChunkData from the data contained in the received - * PtGChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtGChunkDataAttributesException - */ - private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) - throws InvalidReducedPtGChunkDataAttributesException { - - ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), - chunk.getStatus()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtGChunkDataTO from the data contained in the received - * PtGChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { - - ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setFromSURL(chunkTO.fromSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtGChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtGChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtGChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects associated - * to the supplied TRequestToken. - * - * If any of the data retrieved for a given chunk is not well formed and so - * does not allow a ReducedPtGChunkData Object to be created, then that chunk - * is dropped and gets logged, while processing continues with the next one. - * All valid chunks get returned: the others get dropped. - * - * If there are no chunks associated to the given TRequestToken, then an empty - * Collection is returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - TRequestToken rt) { - - Collection reducedChunkDataTOs = dao.findReduced(rt - .getValue()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - ArrayList list = new ArrayList(); - if (reducedChunkDataTOs.isEmpty()) { - log.debug("PtG CHUNK CATALOG! No chunks found in persistence for {}", rt); - } else { - ReducedPtGChunkData reducedChunkData = null; - for (ReducedPtGChunkDataTO reducedChunkDataTO : reducedChunkDataTOs) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}", list); - } - return list; - } - - public Collection lookupReducedPtGChunkData( - TRequestToken requestToken, Collection surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - requestToken, surlsUniqueIDs, surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl }), user); - } - - public Collection lookupPtGChunkData(TSURL surl) { - - return lookupPtGChunkData(Arrays.asList(new TSURL[] { surl })); - } - - public Collection lookupPtGChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - public Collection lookupPtGChunkData(List surls) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.find(surlsUniqueIDs, - surlsArray); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildChunkDataList(chunkDataTOCollection); - } - - private Collection buildChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - PtGPersistentChunkData chunk; - for (PtGChunkDataTO chunkTO : chunkDataTOCollection) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(this.completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: ", e.getMessage()); - } - } - return list; - } - - private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkData Objects matching - * the supplied GridUser and Collection of TSURLs. If any of the data - * retrieved for a given chunk is not well formed and so does not allow a - * ReducedPtGChunkData Object to be created, then that chunk is dropped and - * gets logged, while processing continues with the next one. All valid chunks - * get returned: the others get dropped. If there are no chunks associated to - * the given GridUser and Collection of TSURLs, then an empty Collection is - * returned and a message gets logged. - */ - synchronized public Collection lookupReducedPtGChunkData( - GridUserInterface gu, Collection tsurlCollection) { - - int[] surlsUniqueIDs = new int[tsurlCollection.size()]; - String[] surls = new String[tsurlCollection.size()]; - int index = 0; - for (TSURL tsurl : tsurlCollection) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOCollection = dao.findReduced( - gu.getDn(), surlsUniqueIDs, surls); - log.debug("PtG CHUNK CATALOG: retrieved data {}", chunkDataTOCollection); - return buildReducedChunkDataList(chunkDataTOCollection); - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtGChunkData reducedChunkData; - for (ReducedPtGChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!isComplete(reducedChunkDataTO)) { - completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtG CHUNK CATALOG: returning {}",list); - return list; - } - - /** - * - * - * @param reducedChunkDataTO - * @return - */ - private ReducedPtGChunkData makeOneReduced( - ReducedPtGChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL fromSURL = null; - try { - fromSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.fromSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - fromSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - fromSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // make ReducedPtGChunkData - ReducedPtGChunkData aux = null; - try { - aux = new ReducedPtGChunkData(fromSURL, status); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtGChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! Retrieved malformed Reduced PtG chunk " - + "data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in Persistence. - * - * This method is intended to be used by a recursive PtG request: the parent - * request supplies a directory which must be expanded, so all new children - * requests resulting from the files in the directory are added into - * persistence. - * - * So this method does _not_ add a new SRM prepare_to_get request into the DB! - * - * The only children data written into the DB are: sourceSURL, TDirOption, - * statusCode and explanation. - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! Proper messages get logged by underlaying DAO. - */ - synchronized public void addChild(PtGPersistentChunkData chunkData) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - /* needed for now to find ID of request! Must be changed soon! */ - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - /* add the entry and update the Primary Key field! */ - dao.addChild(to); - /* set the assigned PrimaryKey! */ - chunkData.setPrimaryKey(to.primaryKey()); - } - - /** - * Method used to add into Persistence a new entry. The supplied PtGChunkData - * gets the primary key changed to the value assigned in the Persistence. The - * method requires the GridUser to whom associate the added request. - * - * This method is intended to be used by an srmCopy request in push mode which - * implies a local srmPtG. The only fields from PtGChunkData that are - * considered are: the requestToken, the sourceSURL, the pinLifetime, the - * dirOption, the protocolList, the status and error string. - * - * So this method _adds_ a new SRM prepare_to_get request into the DB! - * - * In case of any error the operation does not proceed, but no Exception is - * thrown! The underlaying DAO logs proper error messagges. - */ - synchronized public void add(PtGPersistentChunkData chunkData, - GridUserInterface gu) { - - PtGChunkDataTO to = new PtGChunkDataTO(); - to.setRequestToken(chunkData.getRequestToken().toString()); - to.setFromSURL(chunkData.getSURL().toString()); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - - to.setLifeTime(new Long(chunkData.getPinLifeTime().value()).intValue()); - to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); - to.setDirOption(chunkData.getDirOption().isDirectory()); - to.setNumLevel(chunkData.getDirOption().getNumLevel()); - to.setProtocolList(TransferProtocolListConverter.toDB(chunkData - .getTransferProtocols())); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - - dao.addNew(to, gu.getDn()); // add the entry and update the Primary Key - // field! - chunkData.setPrimaryKey(to.primaryKey()); // set the assigned PrimaryKey! - } - - /** - * Method used to establish if in Persistence there is a PtGChunkData working - * on the supplied SURL, and whose state is SRM_FILE_PINNED, in which case - * true is returned. In case none are found or there is any problem, false is - * returned. This method is intended to be used by srmMv. - */ - synchronized public boolean isSRM_FILE_PINNED(TSURL surl) { - - return (dao.numberInSRM_FILE_PINNED(surl.uniqueId()) > 0); - - } - - /** - * Method used to transit the specified Collection of ReducedPtGChunkData from - * SRM_FILE_PINNED to SRM_RELEASED. Chunks in any other starting state are not - * transited. In case of any error nothing is done, but proper error messages - * get logged by the underlaying DAO. - */ - synchronized public void transitSRM_FILE_PINNEDtoSRM_RELEASED( - Collection chunks, TRequestToken token) { - - if (chunks == null || chunks.isEmpty()) { - return; - } - long[] primaryKeys = new long[chunks.size()]; - int index = 0; - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - - } - dao.transitSRM_FILE_PINNEDtoSRM_RELEASED(primaryKeys, token); - for (ReducedPtGChunkData chunkData : chunks) { - if (chunkData != null) { - primaryKeys[index] = chunkData.primaryKey(); - index++; - } - } - } - - /** - * Method used to force transition to SRM_RELEASED from SRM_FILE_PINNED, of - * all PtG Requests whose pinLifetime has expired and the state still has not - * been changed (a user forgot to run srmReleaseFiles)! - */ - synchronized public void transitExpiredSRM_FILE_PINNED() { - - List expiredSurls = dao.transitExpiredSRM_FILE_PINNED(); - } - - public void updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public void updateFromPreviousStatus(TSURL surl, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, expectedStatusCode, newStatusCode, - explanation); - - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public void updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtGChunkCatalog.class); + + private static PtGChunkCatalog instance; + + public static synchronized PtGChunkCatalog getInstance() { + if (instance == null) { + instance = new PtGChunkCatalog(); + } + return instance; + } + + private final PtGChunkDAO dao; + + /** + * Private constructor that starts the internal timer needed to periodically check and transit + * requests whose pinLifetime has expired and are in SRM_FILE_PINNED, to SRM_RELEASED. + */ + private PtGChunkCatalog() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtGChunkData. In case any error occurs, the + * operation does not proceed but no Exception is thrown. Error messages get logged. + * + * Only fileSize, StatusCode, errString and transferURL are updated. Likewise for the request + * pinLifetime. + */ + synchronized public void update(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setFileSize(chunkData.getFileSize().value()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTurl(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setLifeTime(PinLifetimeConverter.getInstance().toDB(chunkData.getPinLifeTime().value())); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtGChunkData Objects matching the supplied TRequestToken. + * + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtGChunkData Object to be created, then that part of the request is dropped and gets logged, + * and the processing continues with the next part. All valid chunks get returned: the others get + * dropped. + * + * If there are no chunks to process then an empty Collection is returned, and a messagge gets + * logged. + */ + synchronized public Collection lookup(TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtG CHUNK CATALOG: retrieved data " + chunkTOs); + ArrayList list = new ArrayList(); + if (chunkTOs.isEmpty()) { + log.warn("PtG CHUNK CATALOG! No chunks found in persistence for " + "specified request: {}", + rt); + return list; + } + PtGPersistentChunkData chunk; + for (PtGChunkDataTO chunkTO : chunkTOs) { + chunk = makeOne(chunkTO, rt); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(this.completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtGChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on DB " + "to the request: {}", + e.getMessage()); + } + } + log.debug("PtG CHUNK CATALOG: returning " + list); + return list; + } + + /** + * Generates a PtGChunkData from the received PtGChunkDataTO + * + * @param chunkDataTO + * @param rt + * @return + */ + private PtGPersistentChunkData makeOne(PtGChunkDataTO chunkDataTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + TSURL fromSURL = null; + try { + fromSURL = TSURL.makeFromStringValidate(chunkDataTO.fromSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (chunkDataTO.normalizedStFN() != null) { + fromSURL.setNormalizedStFN(chunkDataTO.normalizedStFN()); + } + if (chunkDataTO.surlUniqueID() != null) { + fromSURL.setUniqueID(chunkDataTO.surlUniqueID().intValue()); + } + // lifeTime + TLifeTimeInSeconds lifeTime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(chunkDataTO.lifeTime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed." + + " Drop the value to the max = {} seconds", max); + pinLifeTime = max; + } + lifeTime = TLifeTimeInSeconds.make((pinLifeTime), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // dirOption + TDirOption dirOption = null; + try { + dirOption = new TDirOption(chunkDataTO.dirOption(), chunkDataTO.allLevelRecursive(), + chunkDataTO.numLevel()); + } catch (InvalidTDirOptionAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // transferProtocols + TURLPrefix transferProtocols = + TransferProtocolListConverter.toSTORM(chunkDataTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols or could " + "not translate TransferProtocols!"); + /* fail construction of PtGChunkData! */ + transferProtocols = null; + } + // fileSize + TSizeInBytes fileSize = null; + try { + fileSize = TSizeInBytes.make(chunkDataTO.fileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(chunkDataTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + chunkDataTO.status()); + } else { + status = new TReturnStatus(code, chunkDataTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (chunkDataTO.vomsAttributes() != null && !chunkDataTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(chunkDataTO.clientDN(), + chunkDataTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(chunkDataTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation." + " IllegalArgumentException: {}", + e.getMessage(), e); + } + // transferURL + /* + * whatever is read is just meaningless because PtG will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtGChunkData + PtGPersistentChunkData aux = null; + try { + aux = new PtGPersistentChunkData(gridUser, rt, fromSURL, lifeTime, dirOption, + transferProtocols, fileSize, status, transferURL); + aux.setPrimaryKey(chunkDataTO.primaryKey()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(chunkDataTO); + log.warn("PtG CHUNK CATALOG! Retrieved malformed PtG chunk data from " + + "persistence. Dropping chunk from request {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtGChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtGChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtGChunkDataTO chunkTO, final ReducedPtGChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.fromSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(new Integer(chunk.fromSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkDataTO completeTO(PtGChunkDataTO chunkTO, + final PtGPersistentChunkData chunk) throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtGChunkData from the data contained in the received PtGChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtGChunkDataAttributesException + */ + private ReducedPtGChunkData reduce(PtGPersistentChunkData chunk) + throws InvalidReducedPtGChunkDataAttributesException { + + ReducedPtGChunkData reducedChunk = new ReducedPtGChunkData(chunk.getSURL(), chunk.getStatus()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtGChunkDataTO from the data contained in the received PtGChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtGChunkDataTO reduce(PtGChunkDataTO chunkTO) { + + ReducedPtGChunkDataTO reducedChunkTO = new ReducedPtGChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setFromSURL(chunkTO.fromSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtGChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtGChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + /** + * Method used to add into Persistence a new entry. The supplied PtGChunkData gets the primary key + * changed to the value assigned in Persistence. + * + * This method is intended to be used by a recursive PtG request: the parent request supplies a + * directory which must be expanded, so all new children requests resulting from the files in the + * directory are added into persistence. + * + * So this method does _not_ add a new SRM prepare_to_get request into the DB! + * + * The only children data written into the DB are: sourceSURL, TDirOption, statusCode and + * explanation. + * + * In case of any error the operation does not proceed, but no Exception is thrown! Proper + * messages get logged by underlaying DAO. + */ + synchronized public void addChild(PtGPersistentChunkData chunkData) { + + PtGChunkDataTO to = new PtGChunkDataTO(); + /* needed for now to find ID of request! Must be changed soon! */ + to.setRequestToken(chunkData.getRequestToken().toString()); + to.setFromSURL(chunkData.getSURL().toString()); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); + + to.setAllLevelRecursive(chunkData.getDirOption().isAllLevelRecursive()); + to.setDirOption(chunkData.getDirOption().isDirectory()); + to.setNumLevel(chunkData.getDirOption().getNumLevel()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + /* add the entry and update the Primary Key field! */ + dao.addChild(to); + /* set the assigned PrimaryKey! */ + chunkData.setPrimaryKey(to.primaryKey()); + } + + public void updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, new String[] {surl.rawSurl()}, + statusCode, explanation); + } + + public void updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, explanation); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java deleted file mode 100644 index 393c1f62e..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDAO.java +++ /dev/null @@ -1,1778 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; - -import it.grid.storm.config.Configuration; -import it.grid.storm.ea.StormEA; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.StoRI; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 3.0 - * @date June 2005 - */ -public class PtGChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtGChunkDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getDBURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - /** Singleton instance */ - private final static PtGChunkDAO dao = new PtGChunkDAO(); - - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - private PtGChunkDAO() { - - setUpConnection(); - - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtGChunkDAO. - */ - public static PtGChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. - * - * The supplied PtGChunkData is used to fill in only the DB table where file - * specific info gets recorded: it does _not_ add a new request! So if - * spurious data is supplied, it will just stay there because of a lack of a - * parent request! - */ - public synchronized void addChild(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addChild - unable to get a valid connection!"); - return; - } - String str = null; - PreparedStatement id = null; // statement to find out the ID associated to - // the request token - ResultSet rsid = null; // result set containing the ID of the request. - try { - - // WARNING!!!! We are forced to run a query to get the ID of the request, - // which should NOT be so - // because the corresponding request object should have been changed with - // the extra field! However, it is not possible - // at the moment to perform such chage because of strict deadline and the - // change could wreak havoc - // the code. So we are forced to make this query!!! - - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // find ID of request corresponding to given RequestToken - str = "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; - - id = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - id.setString(1, to.requestToken()); - printWarnings(id.getWarnings()); - - log.debug("PTG CHUNK DAO: addChild; {}", id.toString()); - rsid = id.executeQuery(); - printWarnings(id.getWarnings()); - - /* ID of request in request_process! */ - int request_id = extractID(rsid); - int id_s = fillPtGTables(to, request_id); - - /* end transaction! */ - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addChild! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rsid); - close(id); - } - } - - /** - * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets - * its primaryKey changed to the one assigned by the DB. The client_dn must - * also be supplied as a String. - * - * The supplied PtGChunkData is used to fill in all the DB tables where file - * specific info gets recorded: it _adds_ a new request! - */ - public synchronized void addNew(PtGChunkDataTO to, String client_dn) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: addNew - unable to get a valid connection!"); - return; - } - String str = null; - /* Result set containing the ID of the inserted new request */ - ResultSet rs_new = null; - /* Insert new request into process_request */ - PreparedStatement addNew = null; - /* Insert protocols for request. */ - PreparedStatement addProtocols = null; - try { - // begin transaction - con.setAutoCommit(false); - printWarnings(con.getWarnings()); - - // add to request_queue... - str = "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) VALUES (?,?,?,?,?,?,?,?)"; - addNew = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - /* Request type set to prepare to get! */ - addNew.setString(1, - RequestTypeConverter.getInstance().toDB(TRequestType.PREPARE_TO_GET)); - printWarnings(addNew.getWarnings()); - - addNew.setString(2, client_dn); - printWarnings(addNew.getWarnings()); - - addNew.setInt(3, to.lifeTime()); - printWarnings(addNew.getWarnings()); - - addNew.setInt( - 4, - StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS)); - printWarnings(addNew.getWarnings()); - - addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); - printWarnings(addNew.getWarnings()); - - addNew.setString(6, to.requestToken()); - printWarnings(addNew.getWarnings()); - - addNew.setInt(7, 1); // number of requested files set to 1! - printWarnings(addNew.getWarnings()); - - addNew.setTimestamp(8, new Timestamp(new Date().getTime())); - printWarnings(addNew.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addNew.toString()); - addNew.execute(); - printWarnings(addNew.getWarnings()); - - rs_new = addNew.getGeneratedKeys(); - int id_new = extractID(rs_new); - - // add protocols... - str = "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; - addProtocols = con.prepareStatement(str); - printWarnings(con.getWarnings()); - for (Iterator i = to.protocolList().iterator(); i.hasNext();) { - addProtocols.setInt(1, id_new); - printWarnings(addProtocols.getWarnings()); - - addProtocols.setString(2, i.next()); - printWarnings(addProtocols.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addProtocols.toString()); - addProtocols.execute(); - printWarnings(addProtocols.getWarnings()); - } - - // addChild... - int id_s = fillPtGTables(to, id_new); - - // end transaction! - con.commit(); - printWarnings(con.getWarnings()); - con.setAutoCommit(true); - printWarnings(con.getWarnings()); - - // update primary key reading the generated key - to.setPrimaryKey(id_s); - } catch (SQLException e) { - log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } catch (Exception e) { - log.error("PTG CHUNK DAO: unable to complete addNew! " - + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); - rollback(con); - } finally { - close(rs_new); - close(addNew); - close(addProtocols); - } - } - - /** - * To be used inside a transaction - * - * @param to - * @param requestQueueID - * @return - * @throws SQLException - * @throws Exception - */ - private synchronized int fillPtGTables(PtGChunkDataTO to, int requestQueueID) - throws SQLException, Exception { - - String str = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_do = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_g = null; - /* Result set containing the ID of the inserted */ - ResultSet rs_s = null; - /* insert TDirOption for request */ - PreparedStatement addDirOption = null; - /* insert request_Get for request */ - PreparedStatement addGet = null; - PreparedStatement addChild = null; - - try { - // first fill in TDirOption - str = "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; - addDirOption = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addDirOption.setBoolean(1, to.dirOption()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setBoolean(2, to.allLevelRecursive()); - printWarnings(addDirOption.getWarnings()); - - addDirOption.setInt(3, to.numLevel()); - printWarnings(addDirOption.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addDirOption.toString()); - addDirOption.execute(); - printWarnings(addDirOption.getWarnings()); - - rs_do = addDirOption.getGeneratedKeys(); - int id_do = extractID(rs_do); - - // second fill in request_Get... sourceSURL and TDirOption! - str = "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) VALUES (?,?,?,?,?)"; - addGet = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addGet.setInt(1, id_do); - printWarnings(addGet.getWarnings()); - - addGet.setInt(2, requestQueueID); - printWarnings(addGet.getWarnings()); - - addGet.setString(3, to.fromSURL()); - printWarnings(addGet.getWarnings()); - - addGet.setString(4, to.normalizedStFN()); - printWarnings(addGet.getWarnings()); - - addGet.setInt(5, to.surlUniqueID()); - printWarnings(addGet.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addGet.toString()); - addGet.execute(); - printWarnings(addGet.getWarnings()); - - rs_g = addGet.getGeneratedKeys(); - int id_g = extractID(rs_g); - - // third fill in status_Get... - str = "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; - addChild = con.prepareStatement(str, Statement.RETURN_GENERATED_KEYS); - printWarnings(con.getWarnings()); - addChild.setInt(1, id_g); - printWarnings(addChild.getWarnings()); - - addChild.setInt(2, to.status()); - printWarnings(addChild.getWarnings()); - - addChild.setString(3, to.errString()); - printWarnings(addChild.getWarnings()); - - log.trace("PTG CHUNK DAO: addNew; {}", addChild.toString()); - addChild.execute(); - printWarnings(addChild.getWarnings()); - - return id_g; - } finally { - close(rs_do); - close(rs_g); - close(rs_s); - close(addDirOption); - close(addGet); - close(addChild); - } - } - - /** - * Method used to save the changes made to a retrieved PtGChunkDataTO, back - * into the MySQL DB. - * - * Only the fileSize, transferURL, statusCode and explanation, of status_Get - * table are written to the DB. Likewise for the request pinLifetime. - * - * In case of any error, an error message gets logged but no exception is - * thrown. - */ - public synchronized void update(PtGChunkDataTO to) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updateFileReq = null; - try { - // ready updateFileReq... - updateFileReq = con - .prepareStatement("UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " - + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"); - printWarnings(con.getWarnings()); - - updateFileReq.setLong(1, to.fileSize()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(2, to.turl()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(3, to.status()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(4, to.errString()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(5, to.lifeTime()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setString(6, to.normalizedStFN()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setInt(7, to.surlUniqueID()); - printWarnings(updateFileReq.getWarnings()); - - updateFileReq.setLong(8, to.primaryKey()); - printWarnings(updateFileReq.getWarnings()); - // execute update - log.trace("PTG CHUNK DAO: update method; {}", updateFileReq.toString()); - updateFileReq.executeUpdate(); - printWarnings(updateFileReq.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update! {}", - e.getMessage(), e); - } finally { - close(updateFileReq); - } - } - - /** - * Updates the request_Get represented by the received ReducedPtGChunkDataTO - * by setting its normalized_sourceSURL_StFN and sourceSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " - + "WHERE rg.ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - update incomplete: {}", stmt.toString()); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * TODO WARNING! THIS IS A WORK IN PROGRESS!!! - * - * Method used to refresh the PtGChunkDataTO information from the MySQL DB. - * - * In this first version, only the statusCode and the TURL are reloaded from - * the DB. TODO The next version must contains all the information related to - * the Chunk! - * - * In case of any error, an error messagge gets logged but no exception is - * thrown. - */ - - public synchronized PtGChunkDataTO refresh(long primary_key) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String queryString = null; - PreparedStatement find = null; - ResultSet rs = null; - - try { - // get chunks of the request - queryString = "SELECT sg.statusCode, sg.transferURL " - + "FROM status_Get sg " + "WHERE sg.request_GetID=?"; - find = con.prepareStatement(queryString); - printWarnings(con.getWarnings()); - find.setLong(1, primary_key); - printWarnings(find.getWarnings()); - log.trace("PTG CHUNK DAO: refresh status method; {}", find.toString()); - - rs = find.executeQuery(); - - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - // The result shoul be un - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setTurl(rs.getString("sg.transferURL")); - } - return chunkDataTO; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return null TransferObject! */ - return null; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtGChunkDataTO - * objects. - * - * An initial simple query establishes the list of protocols associated with - * the request. A second complex query establishes all chunks associated with - * the request, by properly joining request_queue, request_Get, status_Get and - * request_DirOption. The considered fields are: - * - * (1) From status_Get: the ID field which becomes the TOs primary key, and - * statusCode. - * - * (2) From request_Get: sourceSURL - * - * (3) From request_queue: pinLifetime - * - * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, - * numOfLevels - * - * In case of any error, a log gets written and an empty collection is - * returned. No exception is thrown. - * - * NOTE! Chunks in SRM_ABORTED status are NOT returned! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " - + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " - + "d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE rq.r_token=? AND sg.statusCode<>?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - ArrayList list = new ArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PTG CHUNK DAO: find method; " + find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - PtGChunkDataTO chunkDataTO; - while (rs.next()) { - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - chunkDataTO.setProtocolList(protocols); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: ", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=?"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with request token; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtGChunkDataTO(); - reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized Collection findReduced( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surlsArray) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " ) "; - - find = con.prepareStatement(str); - - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, requestToken.getValue()); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtGChunkDataTO associated to the - * given griduser, and whose SURLs are contained in the supplied array of - * Strings. - */ - public synchronized Collection findReduced( - String griduser, int[] surlUniqueIDs, String[] surls) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: findReduced - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - /* - * NOTE: we search also on the fromSurl because otherwise we lost all - * request_get that have not the uniqueID set because are not yet been - * used by anybody - */ - // get reduced chunks - String str = "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - ArrayList list = new ArrayList(); - find.setString(1, griduser); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new ReducedPtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(uniqueID); - } - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* Return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. - * - * This method attempts to change the status of the request to SRM_FAILURE and - * record it in the DB. - * - * This operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messagges - * where recorded. - * - * Yet it soon became clear that the source of malformed data were the clients - * and/or FE recording info in the DB. In these circumstances the client would - * see its request as being in the SRM_IN_PROGRESS state for ever. Hence the - * pressing need to inform it of the encountered problems. - */ - public synchronized void signalMalformedPtGChunk(PtGChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: signalMalformedPtGChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Get SET statusCode=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", explanation=? WHERE request_GetID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - signal.setString(1, "Request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PTG CHUNK DAO: signalMalformed; {}", signal.toString()); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to signal in DB that the request was " - + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Get requests on the given SURL, that are - * in SRM_FILE_PINNED state. - * - * This method is intended to be used by PtGChunkCatalog in the - * isSRM_FILE_PINNED method invocation. - * - * In case of any error, 0 is returned. - */ - // request_Get table - public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: numberInSRM_FILE_PINNED - unable to get a valid connection!"); - return 0; - } - String str = "SELECT COUNT(rg.ID) " - + "FROM status_Get sg JOIN request_Get rg " - + "ON (sg.request_GetID=rg.ID) " - + "WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; - PreparedStatement find = null; - ResultSet rs = null; - try { - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - /* Prepared statement spares DB-specific String notation! */ - find.setInt(1, surlUniqueID); - printWarnings(find.getWarnings()); - - find.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(find.getWarnings()); - - log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - int numberFilePinned = 0; - if (rs.next()) { - numberFilePinned = rs.getInt(1); - } - return numberFilePinned; - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " - + "Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(find); - } - } - - /** - * Method that updates all expired requests in SRM_FILE_PINNED state, into - * SRM_RELEASED. - * - * This is needed when the client forgets to invoke srmReleaseFiles(). - * - * @return - */ - public synchronized List transitExpiredSRM_FILE_PINNED() { - - // tring to the surl unique ID - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitExpiredSRM_FILE_PINNED - unable to get a valid connection!"); - return new ArrayList(); - } - HashMap expiredSurlMap = new HashMap(); - String str = null; - // Statement statement = null; - PreparedStatement preparedStatement = null; - - /* Find all expired surls */ - try { - // start transaction - con.setAutoCommit(false); - - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " - + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}: " - + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); - } - } - expiredSurlMap.put(sourceSURL, uniqueID); - } - - if (expiredSurlMap.isEmpty()) { - commit(con); - log - .trace("PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); - return new ArrayList(); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* Update status of all expired surls to SRM_RELEASED */ - - preparedStatement = null; - try { - - str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(preparedStatement.getWarnings()); - - preparedStatement.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(preparedStatement.getWarnings()); - - log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", - preparedStatement.toString()); - - int count = preparedStatement.executeUpdate(); - printWarnings(preparedStatement.getWarnings()); - - if (count == 0) { - log.trace("PtGChunkDAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" - + " SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtGChunkDAO! Unable to transit expired SRM_FILE_PINNED chunks " - + "of PtG requests, to SRM_RELEASED! {}", e.getMessage(), e); - rollback(con); - return new ArrayList(); - } finally { - close(preparedStatement); - } - - /* - * in order to enhance performance here we can check if there is any file - * system with tape (T1D0, T1D1), if there is not any we can skip the - * following - */ - - /* Find all not expired surls from PtG and BoL */ - - HashSet pinnedSurlSet = new HashSet(); - try { - - // SURLs pinned by PtGs - str = "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " - + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "WHERE sg.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - - ResultSet res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rg.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rg.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); - } - } - pinnedSurlSet.add(uniqueID); - } - - close(preparedStatement); - - // SURLs pinned by BoLs - str = "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " - + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " - + "WHERE sb.statusCode=?" - + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; - - preparedStatement = con.prepareStatement(str); - preparedStatement.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_SUCCESS)); - - res = preparedStatement.executeQuery(); - printWarnings(preparedStatement.getWarnings()); - - while (res.next()) { - String sourceSURL = res.getString("rb.sourceSURL"); - Integer uniqueID = new Integer(res.getInt("rb.sourceSURL_uniqueID")); - /* If the uniqueID is not setted compute it */ - if (res.wasNull()) { - try { - TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); - uniqueID = tsurl.uniqueId(); - } catch (InvalidTSURLAttributesException e) { - log.warn("PtGChunkDAO! unable to build the TSURL from {}. " - + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); - } - } - pinnedSurlSet.add(uniqueID); - } - commit(con); - } catch (SQLException e) { - log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); - rollback(con); - } finally { - close(preparedStatement); - } - - ArrayList expiredSurlList = new ArrayList(); - /* Remove the Extended Attribute pinned if there is not a valid surl on it */ - TSURL surl; - for (Entry surlEntry : expiredSurlMap.entrySet()) { - if (!pinnedSurlSet.contains(surlEntry.getValue())) { - try { - surl = TSURL.makeFromStringValidate(surlEntry.getKey()); - } catch (InvalidTSURLAttributesException e) { - log.error("Invalid SURL, cannot release the pin " - + "(Extended Attribute): {}", surlEntry.getKey()); - continue; - } - expiredSurlList.add(surl); - StoRI stori; - try { - stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); - } catch (Throwable e) { - log.error("Invalid SURL {} cannot release the pin. {}: {}", - surlEntry.getKey(), e.getClass().getCanonicalName(), e.getMessage(), e); - continue; - } - - if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - StormEA.removePinned(stori.getAbsolutePath()); - } - } - } - return expiredSurlList; - } - - /** - * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. - * An array of long representing the primary key of each chunk is required: - * only they get the status changed provided their current status is - * SRM_FILE_PINNED. - * - * This method is used during srmReleaseFiles - * - * In case of any error nothing happens and no exception is thrown, but proper - * messagges get logged. - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg SET sg.statusCode=? " - + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", - stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was " - + "transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " - + "from SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks" - + " from SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * @param ids - * @param token - */ - public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, - TRequestToken token) { - - if (token == null) { - transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); - return; - } - - /* - * If a request token has been specified, only the related Get requests - * have to be released. This is done adding the r.r_token="..." clause in - * the where subquery. - */ - if (!checkConnection()) { - log.error("PTG CHUNK DAO: transitSRM_FILE_PINNEDtoSRM_RELEASED - " - + "unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" - + token.toString() + "' AND rg.ID IN " + makeWhereString(ids); - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_RELEASED)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FILE_PINNED)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was" - + " transited from SRM_FILE_PINNED to SRM_RELEASED."); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " - + "SRM_FILE_PINNED to SRM_RELEASED.", count); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to transit chunks from " - + "SRM_FILE_PINNED to SRM_RELEASED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatus(TRequestToken requestToken, - int[] surlUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" - + requestToken.toString() + "' AND ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, (explanation != null ? explanation : "")); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", - statusCode); - } else { - log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", - count, statusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - } - - public synchronized void updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - - doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized void updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - public synchronized void doUpdateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation, boolean withRequestToken, boolean withSurls, - boolean withExplanation) throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlUniqueIDs == null || surls == null))) { - - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTG CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return; - } - String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sg.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); - } - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt - .setInt(2, StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt.toString()); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTG CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - private void commit(Connection con) { - - if (con != null) { - try { - con.commit(); - con.setAutoCommit(true); - } catch (SQLException e) { - log.error("PtG, SQL Exception: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a failed transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - con.setAutoCommit(true); - log.error("PTG CHUNK DAO: roll back successful!"); - } catch (SQLException e2) { - log.error("PTG CHUNK DAO: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private method that returns the generated ID: it throws an exception in - * case of any problem! - */ - private int extractID(ResultSet rs) throws Exception { - - if (rs == null) { - throw new Exception("PTG CHUNK DAO! Null ResultSet!"); - } - if (rs.next()) { - return rs.getInt(1); - } else { - log.error("PTG CHUNK DAO! It was not possible to establish " - + "the assigned autoincrement primary key!"); - throw new Exception("PTG CHUNK DAO! It was not possible to" - + " establish the assigned autoincrement primary key!"); - } - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeWhereString(long[] rowids) { - - StringBuilder sb = new StringBuilder("("); - int n = rowids.length; - for (int i = 0; i < n; i++) { - sb.append(rowids[i]); - if (i < (n - 1)) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage()); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTG CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTG CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTG CHUNK DAO! Exception in takeDownConnection method: {}", - e.getMessage(), e); - } - } - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, - String[] surlsArray) throws IllegalArgumentException { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PTG CHUNK DAO: find - unable to get a valid connection!"); - return new ArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - - try { - - String str = "SELECT rq.ID, rq.r_token, sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, " - + "rq.client_dn, rq.proxy, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, " - + "d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " - + "WHERE ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rg.sourceSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = new ArrayList(); - - log.trace("PTG CHUNK DAO - find method: {}", find.toString()); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtGChunkDataTO chunkDataTO = null; - while (rs.next()) { - - chunkDataTO = new PtGChunkDataTO(); - chunkDataTO.setStatus(rs.getInt("sg.statusCode")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); - chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rg.normalized_sourceSURL_StFN")); - int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); - } - - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separeted by the "#" char. The proxy is a BLOB, hence it has to be - * properly conveted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); - chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); - chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); - - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTG CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return new ArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sg.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rg.sourceSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " - + makeSurlString(surls) + " ) "; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java deleted file mode 100644 index 7baf7ee18..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGChunkDataTO.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.common.types.TURLPrefix; -import java.sql.Timestamp; -import java.util.List; -import it.grid.storm.namespace.model.Protocol; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the PtGChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP dirOption false status SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 3.0 - * @date June 2005 - */ -public class PtGChunkDataTO { - - private static final String FQAN_SEPARATOR = "#"; - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private boolean dirOption; // initialised in constructor - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int lifetime = 0; - private boolean allLevelRecursive; // initialised in constructor - private int numLevel; // initialised in constructor - private List protocolList = null; // initialised in constructor - private long filesize = 0; - private int status; // initialised in constructor - private String errString = " "; - private String turl = " "; - private Timestamp timeStamp; - private String clientDN = null; - private String vomsAttributes = null; - - public PtGChunkDataTO() { - - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - this.dirOption = false; - // - this.allLevelRecursive = false; - this.numLevel = 0; - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param sURLUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer sURLUniqueID) { - - this.surlUniqueID = sURLUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int lifeTime() { - - return lifetime; - } - - public void setLifeTime(int n) { - - lifetime = n; - } - - public boolean dirOption() { - - return dirOption; - } - - public void setDirOption(boolean b) { - - dirOption = b; - } - - public boolean allLevelRecursive() { - - return allLevelRecursive; - } - - public void setAllLevelRecursive(boolean b) { - - allLevelRecursive = b; - } - - public int numLevel() { - - return numLevel; - } - - public void setNumLevel(int n) { - - numLevel = n; - } - - public List protocolList() { - - return protocolList; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) - protocolList = l; - } - - public long fileSize() { - - return filesize; - } - - public void setFileSize(long n) { - - filesize = n; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String turl() { - - return turl; - } - - public void setTurl(String s) { - - turl = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - vomsAttributes = s; - } - - public void setVomsAttributes(String[] fqaNsAsString) { - - vomsAttributes = ""; - for (int i = 0; i < fqaNsAsString.length; i++) { - vomsAttributes += fqaNsAsString[i]; - if (i < fqaNsAsString.length - 1) { - vomsAttributes += FQAN_SEPARATOR; - } - } - - } - - public String[] vomsAttributesArray() { - - return vomsAttributes.split(FQAN_SEPARATOR); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(lifetime); - sb.append(" "); - sb.append(dirOption); - sb.append(" "); - sb.append(allLevelRecursive); - sb.append(" "); - sb.append(numLevel); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(filesize); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(turl); - return sb.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGData.java b/src/main/java/it/grid/storm/catalogs/PtGData.java deleted file mode 100644 index 0ef728428..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGData.java +++ /dev/null @@ -1,38 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TSizeInBytes; - -public interface PtGData extends FileTransferData { - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds getPinLifeTime(); - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption getDirOption(); - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes getFileSize(); - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, then nothing gets set! - */ - public void setFileSize(TSizeInBytes size); - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java deleted file mode 100644 index a363e36c2..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtGPersistentChunkData.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL, the requested lifeTime of pinning, the TDirOption which tells - * whether the requested SURL is a directory and if it must be recursed at all - * levels, as well as the desired number of levels to recurse, the desired - * transferProtocols in order of preference, the fileSize, and the transferURL - * for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 3.0 - */ -public class PtGPersistentChunkData extends IdentityPtGData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(PtGPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Get table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private TRequestToken requestToken; - - /** - * @param requestToken - * @param fromSURL - * @param lifeTime - * @param dirOption - * @param desiredProtocols - * @param fileSize - * @param status - * @param transferURL - * @throws InvalidPtGDataAttributesException - */ - public PtGPersistentChunkData(GridUserInterface auth, - TRequestToken requestToken, TSURL fromSURL, TLifeTimeInSeconds lifeTime, - TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, - TReturnStatus status, TTURL transferURL) - throws InvalidPtGDataAttributesException, - InvalidPtGDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(auth, fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, - status, transferURL); - if (requestToken == null) { - log.debug("PtGPersistentChunkData: requestToken is null!"); - throw new InvalidPtGPersistentChunkDataAttributesException(requestToken, - fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, - transferURL); - } - - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - @Override - public TRequestToken getRequestToken() { - - return requestToken; - } - - /** - * Method that sets the status of this request to SRM_FILE_PINNED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FILE_PINNED(String explanation) { - - setStatus(TStatusCode.SRM_FILE_PINNED, explanation); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PtGPersistentChunkData other = (PtGPersistentChunkData) obj; - if (primaryKey != other.primaryKey) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtGPersistentChunkData [primaryKey="); - builder.append(primaryKey); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", pinLifeTime="); - builder.append(pinLifeTime); - builder.append(", dirOption="); - builder.append(dirOption); - builder.append(", fileSize="); - builder.append(fileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java index 482f3e15f..e00ec0367 100644 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/PtPChunkCatalog.java @@ -17,6 +17,15 @@ package it.grid.storm.catalogs; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.common.types.SizeUnit; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; @@ -24,6 +33,26 @@ import it.grid.storm.griduser.AbstractGridUser; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.FileLifetimeConverter; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.SizeInBytesIntConverter; +import it.grid.storm.persistence.converter.SpaceTokenStringConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TURLConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.PtPPersistentChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkData; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -39,20 +68,11 @@ import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.srm.types.TTURL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and - * provides methods for looking up a PtPChunkData based on TRequestToken, as - * well as for updating data into persistence. Methods are also supplied to - * evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit expired - * SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED. + * Class that represents StoRMs PtPChunkCatalog: it collects PtPChunkData and provides methods for + * looking up a PtPChunkData based on TRequestToken, as well as for updating data into persistence. + * Methods are also supplied to evaluate if a SURL is in SRM_SPACE_AVAILABLE state, and to transit + * expired SURLs in SRM_SPACE_AVAILABLE state to SRM_FILE_LIFETIME_EXPIRED. * * @author EGRID - ICTP Trieste * @date June, 2005 @@ -60,554 +80,412 @@ */ public class PtPChunkCatalog { - private static final Logger log = LoggerFactory - .getLogger(PtPChunkCatalog.class); - - /* only instance of PtPChunkCatalog present in StoRM! */ - private static final PtPChunkCatalog cat = new PtPChunkCatalog(); - private final PtPChunkDAO dao = PtPChunkDAO.getInstance(); - - private PtPChunkCatalog() {} - - /** - * Method that returns the only instance of PtPChunkCatalog available. - */ - public static PtPChunkCatalog getInstance() { - - return cat; - } - - /** - * Method used to update into Persistence a retrieved PtPChunkData. - */ - synchronized public void update(PtPPersistentChunkData chunkData) { - - PtPChunkDataTO to = new PtPChunkDataTO(); - /* rimary key needed by DAO Object */ - to.setPrimaryKey(chunkData.getPrimaryKey()); - to.setStatus(StatusCodeConverter.getInstance().toDB( - chunkData.getStatus().getStatusCode())); - to.setErrString(chunkData.getStatus().getExplanation()); - to.setTransferURL(TURLConverter.getInstance().toDB( - chunkData.getTransferURL().toString())); - to.setPinLifetime(PinLifetimeConverter.getInstance().toDB( - chunkData.pinLifetime().value())); - to.setFileLifetime(FileLifetimeConverter.getInstance().toDB( - chunkData.fileLifetime().value())); - to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB( - chunkData.fileStorageType())); - to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB( - chunkData.overwriteOption())); - to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); - to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); - to.setClientDN(chunkData.getUser().getDn()); - if (chunkData.getUser() instanceof AbstractGridUser) { - if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { - to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()) - .getFQANsAsString()); - } - - } - dao.update(to); - } - - /** - * Method that returns a Collection of PtPChunkData Objects matching the - * supplied TRequestToken. If any of the data associated to the TRequestToken - * is not well formed and so does not allow a PtPChunkData Object to be - * created, then that part of the request is dropped, gets logged and an - * attempt is made to write in the DB that the chunk was malformed; the - * processing continues with the next part. Only the valid chunks get - * returned. If there are no chunks to process then an empty Collection is - * returned, and a messagge gets logged. NOTE! Chunks in SRM_ABORTED status - * are NOT returned! This is imporant because this method is intended to be - * used by the Feeders to fetch all chunks in the request, and aborted chunks - * should not be picked up for processing! - */ - synchronized public Collection lookup( - final TRequestToken rt) { - - Collection chunkTOs = dao.find(rt); - log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); - return buildChunkDataList(chunkTOs); - } - - /** - * Private method used to create a PtPChunkData object, from a PtPChunkDataTO - * and TRequestToken. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the chunk is - * malformed. - */ - private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { - - StringBuilder errorSb = new StringBuilder(); - // toSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (auxTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(auxTO.normalizedStFN()); - } - if (auxTO.surlUniqueID() != null) { - toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); - } - // pinLifetime - TLifeTimeInSeconds pinLifetime = null; - try { - long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM( - auxTO.pinLifetime()); - // Check for max value allowed - long max = Configuration.getInstance().getPinLifetimeMaximum(); - if (pinLifeTime > max) { - log.warn("PinLifeTime is greater than the max value allowed. Drop the " - + "value to the max = {} seconds", max); - pinLifeTime = max; - } - pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(auxTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be translated from " - + "its String representation! String: " + auxTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb.append("\nUsed the default TFileStorageType as defined " - + "in StoRM config.: " + fileStorageType); - } - // expectedFileSize - // - // WARNING! A converter is used because the DB uses 0 for empty, whereas - // StoRM object model does allow a 0 size! Since this is an optional - // field - // in the SRM specs, null must be converted explicitly to Empty - // TSizeInBytes - // because it is indeed well formed! - TSizeInBytes expectedFileSize = null; - TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); - long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM( - auxTO.expectedFileSize()); - if (emptySize.value() == sizeTranslation) { - expectedFileSize = emptySize; - } else { - try { - expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), - SizeUnit.BYTES); - } catch (InvalidTSizeAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // spaceToken! - // - // WARNING! A converter is still needed because of DB logic for missing - // SpaceToken makes use of NULL, whereas StoRM object model does not - // allow - // for null! It makes use of a specific Empty type. - // - // Indeed, the SpaceToken field is optional, so a request with a null - // value - // for the SpaceToken field in the DB, _is_ well formed! - TSpaceToken spaceToken = null; - TSpaceToken emptyToken = TSpaceToken.makeEmpty(); - /** - * convert empty string representation of DPM into StoRM representation; - */ - String spaceTokenTranslation = SpaceTokenStringConverter.getInstance() - .toStoRM(auxTO.spaceToken()); - if (emptyToken.toString().equals(spaceTokenTranslation)) { - spaceToken = emptyToken; - } else { - try { - spaceToken = TSpaceToken.make(spaceTokenTranslation); - } catch (InvalidTSpaceTokenAttributesException e) { - errorSb.append("\n"); - errorSb.append(e); - } - } - // overwriteOption! - TOverwriteMode overwriteOption = OverwriteModeConverter.getInstance() - .toSTORM(auxTO.overwriteOption()); - if (overwriteOption == TOverwriteMode.EMPTY) { - errorSb.append("\nTOverwriteMode could not be translated " - + "from its String representation! String: " + auxTO.overwriteOption()); - overwriteOption = null; - } - // transferProtocols - TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO - .protocolList()); - if (transferProtocols.size() == 0) { - errorSb.append("\nEmpty list of TransferProtocols " - + "or could not translate TransferProtocols!"); - transferProtocols = null; // fail construction of PtPChunkData! - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance() - .toSTORM(auxTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + auxTO.status()); - } else { - status = new TReturnStatus(code, auxTO.errString()); - } - GridUserInterface gridUser = null; - try { - if (auxTO.vomsAttributes() != null - && !auxTO.vomsAttributes().trim().equals("")) { - gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), - auxTO.vomsAttributesArray()); - } else { - gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); - } - - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - - // transferURL - /** - * whatever is read is just meaningless because PtP will fill it in!!! So - * create an Empty TTURL by default! Vital to avoid problems with unknown - * DPM NULL/EMPTY logic policy! - */ - TTURL transferURL = TTURL.makeEmpty(); - // make PtPChunkData - PtPPersistentChunkData aux = null; - try { - aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, - fileLifetime, fileStorageType, spaceToken, expectedFileSize, - transferProtocols, overwriteOption, status, transferURL); - aux.setPrimaryKey(auxTO.primaryKey()); - } catch (InvalidPtPPersistentChunkDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidPtPDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidFileTransferDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } catch (InvalidSurlRequestDataAttributesException e) { - dao.signalMalformedPtPChunk(auxTO); - log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" - + " from persistence. Dropping chunk from request: {}", rt); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - /** - * - * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique - * ID taken from the PtPChunkData - * - * @param chunkTO - * @param chunk - */ - private void completeTO(ReducedPtPChunkDataTO chunkTO, - final ReducedPtPChunkData chunk) { - - chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); - chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); - } - - /** - * - * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and - * completes it with the normalized StFN and the SURL unique ID taken from the - * PtGChunkData - * - * @param chunkTO - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, - final PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); - this.completeTO(reducedChunkTO, this.reduce(chunk)); - return reducedChunkTO; - } - - /** - * Creates a ReducedPtPChunkData from the data contained in the received - * PtPChunkData - * - * @param chunk - * @return - * @throws InvalidReducedPtPChunkDataAttributesException - */ - private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) - throws InvalidReducedPtPChunkDataAttributesException { - - ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), - chunk.getStatus(), chunk.fileStorageType(), chunk.fileLifetime()); - reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); - return reducedChunk; - } - - /** - * Creates a ReducedPtPChunkDataTO from the data contained in the received - * PtPChunkDataTO - * - * @param chunkTO - * @return - */ - private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { - - ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); - reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); - reducedChunkTO.setToSURL(chunkTO.toSURL()); - reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); - reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); - reducedChunkTO.setStatus(chunkTO.status()); - reducedChunkTO.setErrString(chunkTO.errString()); - return reducedChunkTO; - } - - /** - * Checks if the received PtPChunkDataTO contains the fields not set by the - * front end but required - * - * @param chunkTO - * @return - */ - private boolean isComplete(PtPChunkDataTO chunkTO) { - - return (chunkTO.normalizedStFN() != null) - && (chunkTO.surlUniqueID() != null); - } - - /** - * Checks if the received ReducedPtGChunkDataTO contains the fields not set by - * the front end but required - * - * @param reducedChunkTO - * @return - */ - private boolean isComplete(ReducedPtPChunkDataTO reducedChunkTO) { - - return (reducedChunkTO.normalizedStFN() != null) - && (reducedChunkTO.surlUniqueID() != null); - } - - public Collection lookupReducedPtPChunkData( - TRequestToken requestToken, Collection surls) { - - Collection reducedChunkDataTOs = dao.findReduced( - requestToken.getValue(), surls); - log.debug("PtP CHUNK CATALOG: retrieved data {}", reducedChunkDataTOs); - return buildReducedChunkDataList(reducedChunkDataTOs); - } - - public Collection lookupPtPChunkData(TSURL surl, - GridUserInterface user) { - - return lookupPtPChunkData( - (List) Arrays.asList(new TSURL[] { surl }), user); - } - - private Collection lookupPtPChunkData( - List surls, GridUserInterface user) { - - int[] surlsUniqueIDs = new int[surls.size()]; - String[] surlsArray = new String[surls.size()]; - int index = 0; - for (TSURL tsurl : surls) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surlsArray[index] = tsurl.rawSurl(); - index++; - } - Collection chunkDataTOs = dao.find(surlsUniqueIDs, - surlsArray, user.getDn()); - log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); - return buildChunkDataList(chunkDataTOs); - } - - private Collection buildChunkDataList( - Collection chunkDataTOs) { - - ArrayList list = new ArrayList(); - PtPPersistentChunkData chunk; - for (PtPChunkDataTO chunkTO : chunkDataTOs) { - chunk = makeOne(chunkTO); - if (chunk == null) { - continue; - } - list.add(chunk); - if (isComplete(chunkTO)) { - continue; - } - try { - dao.updateIncomplete(completeTO(chunkTO, chunk)); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtG CHUNK CATALOG! unable to add missing informations on " - + "DB to the request: {}", e.getMessage()); - } - } - log.debug("PtPChunkCatalog: returning {}\n\n", list); - return list; - } - - private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { - - try { - return makeOne(chunkTO, - new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); - } catch (InvalidTRequestTokenAttributesException e) { - throw new IllegalStateException( - "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " - + e); - } - } - - private Collection buildReducedChunkDataList( - Collection chunkDataTOCollection) { - - ArrayList list = new ArrayList(); - ReducedPtPChunkData reducedChunkData; - for (ReducedPtPChunkDataTO reducedChunkDataTO : chunkDataTOCollection) { - reducedChunkData = makeOneReduced(reducedChunkDataTO); - if (reducedChunkData != null) { - list.add(reducedChunkData); - if (!this.isComplete(reducedChunkDataTO)) { - this.completeTO(reducedChunkDataTO, reducedChunkData); - dao.updateIncomplete(reducedChunkDataTO); - } - } - } - log.debug("PtP CHUNK CATALOG: returning {}", list); - return list; - } - - private ReducedPtPChunkData makeOneReduced( - ReducedPtPChunkDataTO reducedChunkDataTO) { - - StringBuilder errorSb = new StringBuilder(); - // fromSURL - TSURL toSURL = null; - try { - toSURL = TSURL.makeFromStringValidate(reducedChunkDataTO.toSURL()); - } catch (InvalidTSURLAttributesException e) { - errorSb.append(e); - } - if (reducedChunkDataTO.normalizedStFN() != null) { - toSURL.setNormalizedStFN(reducedChunkDataTO.normalizedStFN()); - } - if (reducedChunkDataTO.surlUniqueID() != null) { - toSURL.setUniqueID(reducedChunkDataTO.surlUniqueID().intValue()); - } - // status - TReturnStatus status = null; - TStatusCode code = StatusCodeConverter.getInstance().toSTORM( - reducedChunkDataTO.status()); - if (code == TStatusCode.EMPTY) { - errorSb.append("\nRetrieved StatusCode was not recognised: " - + reducedChunkDataTO.status()); - } else { - status = new TReturnStatus(code, reducedChunkDataTO.errString()); - } - // fileStorageType - TFileStorageType fileStorageType = FileStorageTypeConverter.getInstance() - .toSTORM(reducedChunkDataTO.fileStorageType()); - if (fileStorageType == TFileStorageType.EMPTY) { - errorSb.append("\nTFileStorageType could not be " - + "translated from its String representation! String: " - + reducedChunkDataTO.fileStorageType()); - // Use the default value defined in Configuration. - fileStorageType = TFileStorageType.getTFileStorageType(Configuration - .getInstance().getDefaultFileStorageType()); - errorSb - .append("\nUsed the default TFileStorageType as defined in StoRM config.: " - + fileStorageType); - } - // fileLifetime - TLifeTimeInSeconds fileLifetime = null; - try { - fileLifetime = TLifeTimeInSeconds.make(FileLifetimeConverter - .getInstance().toStoRM(reducedChunkDataTO.fileLifetime()), - TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - errorSb.append("\n"); - errorSb.append(e); - } - // make ReducedPtPChunkData - ReducedPtPChunkData aux = null; - try { - aux = new ReducedPtPChunkData(toSURL, status, fileStorageType, - fileLifetime); - aux.setPrimaryKey(reducedChunkDataTO.primaryKey()); - } catch (InvalidReducedPtPChunkDataAttributesException e) { - log.warn("PtP CHUNK CATALOG! Retrieved malformed Reduced PtP" - + " chunk data from persistence: dropping reduced chunk..."); - log.warn(e.getMessage(), e); - log.warn(errorSb.toString()); - } - // end... - return aux; - } - - public int updateStatus(TRequestToken requestToken, TSURL surl, - TStatusCode statusCode, String explanation) { - - return dao.updateStatus(requestToken, new int[] { surl.uniqueId() }, - new String[] { surl.rawSurl() }, statusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, - newStatusCode, explanation); - } - - public int updateFromPreviousStatus(TRequestToken requestToken, - List surlList, TStatusCode expectedStatusCode, - TStatusCode newStatusCode) { - - int[] surlsUniqueIDs = new int[surlList.size()]; - String[] surls = new String[surlList.size()]; - int index = 0; - for (TSURL tsurl : surlList) { - surlsUniqueIDs[index] = tsurl.uniqueId(); - surls[index] = tsurl.rawSurl(); - index++; - } - return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode); - } + private static final Logger log = LoggerFactory.getLogger(PtPChunkCatalog.class); + + private static PtPChunkCatalog instance; + + public static synchronized PtPChunkCatalog getInstance() { + if (instance == null) { + instance = new PtPChunkCatalog(); + } + return instance; + } + + private final PtPChunkDAO dao; + + private PtPChunkCatalog() { + dao = PtPChunkDAOMySql.getInstance(); + } + + /** + * Method used to update into Persistence a retrieved PtPChunkData. + */ + public synchronized void update(PtPPersistentChunkData chunkData) { + + PtPChunkDataTO to = new PtPChunkDataTO(); + /* Primary key needed by DAO Object */ + to.setPrimaryKey(chunkData.getPrimaryKey()); + to.setStatus(StatusCodeConverter.getInstance().toDB(chunkData.getStatus().getStatusCode())); + to.setErrString(chunkData.getStatus().getExplanation()); + to.setTransferURL(TURLConverter.getInstance().toDB(chunkData.getTransferURL().toString())); + to.setPinLifetime(PinLifetimeConverter.getInstance().toDB(chunkData.pinLifetime().value())); + to.setFileLifetime(FileLifetimeConverter.getInstance().toDB(chunkData.fileLifetime().value())); + to.setFileStorageType(FileStorageTypeConverter.getInstance().toDB(chunkData.fileStorageType())); + to.setOverwriteOption(OverwriteModeConverter.getInstance().toDB(chunkData.overwriteOption())); + to.setNormalizedStFN(chunkData.getSURL().normalizedStFN()); + to.setSurlUniqueID(new Integer(chunkData.getSURL().uniqueId())); + to.setClientDN(chunkData.getUser().getDn()); + if (chunkData.getUser() instanceof AbstractGridUser) { + if (((AbstractGridUser) chunkData.getUser()).hasVoms()) { + to.setVomsAttributes(((AbstractGridUser) chunkData.getUser()).getFQANsAsString()); + } + + } + dao.update(to); + } + + /** + * Method that returns a Collection of PtPChunkData Objects matching the supplied TRequestToken. + * If any of the data associated to the TRequestToken is not well formed and so does not allow a + * PtPChunkData Object to be created, then that part of the request is dropped, gets logged and an + * attempt is made to write in the DB that the chunk was malformed; the processing continues with + * the next part. Only the valid chunks get returned. If there are no chunks to process then an + * empty Collection is returned, and a message gets logged. NOTE! Chunks in SRM_ABORTED status are + * NOT returned! This is important because this method is intended to be used by the Feeders to + * fetch all chunks in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection lookup(final TRequestToken rt) { + + Collection chunkTOs = dao.find(rt); + log.debug("PtPChunkCatalog: retrieved data {}", chunkTOs); + return buildChunkDataList(chunkTOs); + } + + /** + * Private method used to create a PtPChunkData object, from a PtPChunkDataTO and TRequestToken. + * If a chunk cannot be created, an error messagge gets logged and an attempt is made to signal in + * the DB that the chunk is malformed. + */ + private PtPPersistentChunkData makeOne(PtPChunkDataTO auxTO, TRequestToken rt) { + + StringBuilder errorSb = new StringBuilder(); + // toSURL + TSURL toSURL = null; + try { + toSURL = TSURL.makeFromStringValidate(auxTO.toSURL()); + } catch (InvalidTSURLAttributesException e) { + errorSb.append(e); + } + if (auxTO.normalizedStFN() != null) { + toSURL.setNormalizedStFN(auxTO.normalizedStFN()); + } + if (auxTO.surlUniqueID() != null) { + toSURL.setUniqueID(auxTO.surlUniqueID().intValue()); + } + // pinLifetime + TLifeTimeInSeconds pinLifetime = null; + try { + long pinLifeTime = PinLifetimeConverter.getInstance().toStoRM(auxTO.pinLifetime()); + // Check for max value allowed + long max = Configuration.getInstance().getPinLifetimeMaximum(); + if (pinLifeTime > max) { + log.warn("PinLifeTime is greater than the max value allowed. Drop the " + + "value to the max = {} seconds", max); + pinLifeTime = max; + } + pinLifetime = TLifeTimeInSeconds.make(pinLifeTime, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileLifetime + TLifeTimeInSeconds fileLifetime = null; + try { + fileLifetime = TLifeTimeInSeconds + .make(FileLifetimeConverter.getInstance().toStoRM(auxTO.fileLifetime()), TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + errorSb.append("\n"); + errorSb.append(e); + } + // fileStorageType + TFileStorageType fileStorageType = + FileStorageTypeConverter.getInstance().toSTORM(auxTO.fileStorageType()); + if (fileStorageType == TFileStorageType.EMPTY) { + errorSb.append("\nTFileStorageType could not be translated from " + + "its String representation! String: " + auxTO.fileStorageType()); + // Use the default value defined in Configuration. + fileStorageType = TFileStorageType + .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType()); + errorSb.append("\nUsed the default TFileStorageType as defined " + "in StoRM config.: " + + fileStorageType); + } + // expectedFileSize + // + // WARNING! A converter is used because the DB uses 0 for empty, whereas + // StoRM object model does allow a 0 size! Since this is an optional + // field + // in the SRM specs, null must be converted explicitly to Empty + // TSizeInBytes + // because it is indeed well formed! + TSizeInBytes expectedFileSize = null; + TSizeInBytes emptySize = TSizeInBytes.makeEmpty(); + long sizeTranslation = SizeInBytesIntConverter.getInstance().toStoRM(auxTO.expectedFileSize()); + if (emptySize.value() == sizeTranslation) { + expectedFileSize = emptySize; + } else { + try { + expectedFileSize = TSizeInBytes.make(auxTO.expectedFileSize(), SizeUnit.BYTES); + } catch (InvalidTSizeAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // spaceToken! + // + // WARNING! A converter is still needed because of DB logic for missing + // SpaceToken makes use of NULL, whereas StoRM object model does not + // allow + // for null! It makes use of a specific Empty type. + // + // Indeed, the SpaceToken field is optional, so a request with a null + // value + // for the SpaceToken field in the DB, _is_ well formed! + TSpaceToken spaceToken = null; + TSpaceToken emptyToken = TSpaceToken.makeEmpty(); + /** + * convert empty string representation of DPM into StoRM representation; + */ + String spaceTokenTranslation = + SpaceTokenStringConverter.getInstance().toStoRM(auxTO.spaceToken()); + if (emptyToken.toString().equals(spaceTokenTranslation)) { + spaceToken = emptyToken; + } else { + try { + spaceToken = TSpaceToken.make(spaceTokenTranslation); + } catch (InvalidTSpaceTokenAttributesException e) { + errorSb.append("\n"); + errorSb.append(e); + } + } + // overwriteOption! + TOverwriteMode overwriteOption = + OverwriteModeConverter.getInstance().toSTORM(auxTO.overwriteOption()); + if (overwriteOption == TOverwriteMode.EMPTY) { + errorSb.append("\nTOverwriteMode could not be translated " + + "from its String representation! String: " + auxTO.overwriteOption()); + overwriteOption = null; + } + // transferProtocols + TURLPrefix transferProtocols = TransferProtocolListConverter.toSTORM(auxTO.protocolList()); + if (transferProtocols.size() == 0) { + errorSb + .append("\nEmpty list of TransferProtocols " + "or could not translate TransferProtocols!"); + transferProtocols = null; // fail construction of PtPChunkData! + } + // status + TReturnStatus status = null; + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(auxTO.status()); + if (code == TStatusCode.EMPTY) { + errorSb.append("\nRetrieved StatusCode was not recognised: " + auxTO.status()); + } else { + status = new TReturnStatus(code, auxTO.errString()); + } + GridUserInterface gridUser = null; + try { + if (auxTO.vomsAttributes() != null && !auxTO.vomsAttributes().trim().equals("")) { + gridUser = GridUserManager.makeVOMSGridUser(auxTO.clientDN(), auxTO.vomsAttributesArray()); + } else { + gridUser = GridUserManager.makeGridUser(auxTO.clientDN()); + } + + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + + // transferURL + /** + * whatever is read is just meaningless because PtP will fill it in!!! So create an Empty TTURL + * by default! Vital to avoid problems with unknown DPM NULL/EMPTY logic policy! + */ + TTURL transferURL = TTURL.makeEmpty(); + // make PtPChunkData + PtPPersistentChunkData aux = null; + try { + aux = new PtPPersistentChunkData(gridUser, rt, toSURL, pinLifetime, fileLifetime, + fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status, + transferURL); + aux.setPrimaryKey(auxTO.primaryKey()); + } catch (InvalidPtPPersistentChunkDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidPtPDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidFileTransferDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } catch (InvalidSurlRequestDataAttributesException e) { + dao.fail(auxTO); + log.warn("PtP CHUNK CATALOG! Retrieved malformed PtP chunk data" + + " from persistence. Dropping chunk from request: {}", rt); + log.warn(e.getMessage(), e); + log.warn(errorSb.toString()); + } + // end... + return aux; + } + + /** + * + * Adds to the received PtPChunkDataTO the normalized StFN and the SURL unique ID taken from the + * PtPChunkData + * + * @param chunkTO + * @param chunk + */ + private void completeTO(ReducedPtPChunkDataTO chunkTO, final ReducedPtPChunkData chunk) { + + chunkTO.setNormalizedStFN(chunk.toSURL().normalizedStFN()); + chunkTO.setSurlUniqueID(new Integer(chunk.toSURL().uniqueId())); + } + + /** + * + * Creates a ReducedPtGChunkDataTO from the received PtGChunkDataTO and completes it with the + * normalized StFN and the SURL unique ID taken from the PtGChunkData + * + * @param chunkTO + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkDataTO completeTO(PtPChunkDataTO chunkTO, + final PtPPersistentChunkData chunk) throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkDataTO reducedChunkTO = this.reduce(chunkTO); + this.completeTO(reducedChunkTO, this.reduce(chunk)); + return reducedChunkTO; + } + + /** + * Creates a ReducedPtPChunkData from the data contained in the received PtPChunkData + * + * @param chunk + * @return + * @throws InvalidReducedPtPChunkDataAttributesException + */ + private ReducedPtPChunkData reduce(PtPPersistentChunkData chunk) + throws InvalidReducedPtPChunkDataAttributesException { + + ReducedPtPChunkData reducedChunk = new ReducedPtPChunkData(chunk.getSURL(), chunk.getStatus(), + chunk.fileStorageType(), chunk.fileLifetime()); + reducedChunk.setPrimaryKey(chunk.getPrimaryKey()); + return reducedChunk; + } + + /** + * Creates a ReducedPtPChunkDataTO from the data contained in the received PtPChunkDataTO + * + * @param chunkTO + * @return + */ + private ReducedPtPChunkDataTO reduce(PtPChunkDataTO chunkTO) { + + ReducedPtPChunkDataTO reducedChunkTO = new ReducedPtPChunkDataTO(); + reducedChunkTO.setPrimaryKey(chunkTO.primaryKey()); + reducedChunkTO.setToSURL(chunkTO.toSURL()); + reducedChunkTO.setNormalizedStFN(chunkTO.normalizedStFN()); + reducedChunkTO.setSurlUniqueID(chunkTO.surlUniqueID()); + reducedChunkTO.setStatus(chunkTO.status()); + reducedChunkTO.setErrString(chunkTO.errString()); + return reducedChunkTO; + } + + /** + * Checks if the received PtPChunkDataTO contains the fields not set by the front end but required + * + * @param chunkTO + * @return + */ + private boolean isComplete(PtPChunkDataTO chunkTO) { + + return (chunkTO.normalizedStFN() != null) && (chunkTO.surlUniqueID() != null); + } + + public Collection lookupPtPChunkData(TSURL surl, GridUserInterface user) { + + return lookupPtPChunkData((List) Arrays.asList(new TSURL[] {surl}), user); + } + + private Collection lookupPtPChunkData(List surls, + GridUserInterface user) { + + int[] surlsUniqueIDs = new int[surls.size()]; + String[] surlsArray = new String[surls.size()]; + int index = 0; + for (TSURL tsurl : surls) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surlsArray[index] = tsurl.rawSurl(); + index++; + } + Collection chunkDataTOs = dao.find(surlsUniqueIDs, surlsArray, user.getDn()); + log.debug("PtP CHUNK CATALOG: retrieved data {}", chunkDataTOs); + return buildChunkDataList(chunkDataTOs); + } + + private Collection buildChunkDataList( + Collection chunkDataTOs) { + + Collection list = Lists.newArrayList(); + PtPPersistentChunkData chunk; + for (PtPChunkDataTO chunkTO : chunkDataTOs) { + chunk = makeOne(chunkTO); + if (chunk == null) { + continue; + } + list.add(chunk); + if (isComplete(chunkTO)) { + continue; + } + try { + dao.updateIncomplete(completeTO(chunkTO, chunk)); + } catch (InvalidReducedPtPChunkDataAttributesException e) { + log.warn( + "PtG CHUNK CATALOG! unable to add missing informations on " + "DB to the request: {}", + e.getMessage()); + } + } + log.debug("PtPChunkCatalog: returning {}\n\n", list); + return list; + } + + private PtPPersistentChunkData makeOne(PtPChunkDataTO chunkTO) { + + try { + return makeOne(chunkTO, new TRequestToken(chunkTO.requestToken(), chunkTO.timeStamp())); + } catch (InvalidTRequestTokenAttributesException e) { + throw new IllegalStateException( + "Unexpected InvalidTRequestTokenAttributesException in TRequestToken: " + e); + } + } + + public int updateStatus(TRequestToken requestToken, TSURL surl, TStatusCode statusCode, + String explanation) { + + return dao.updateStatus(requestToken, new int[] {surl.uniqueId()}, + new String[] {surl.rawSurl()}, statusCode, explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation) { + + return dao.updateStatusOnMatchingStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + public int updateFromPreviousStatus(TRequestToken requestToken, List surlList, + TStatusCode expectedStatusCode, TStatusCode newStatusCode) { + + int[] surlsUniqueIDs = new int[surlList.size()]; + String[] surls = new String[surlList.size()]; + int index = 0; + for (TSURL tsurl : surlList) { + surlsUniqueIDs[index] = tsurl.uniqueId(); + surls[index] = tsurl.rawSurl(); + index++; + } + return dao.updateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode); + } } diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java deleted file mode 100644 index b6d89c3c1..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDAO.java +++ /dev/null @@ -1,1683 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; -import static it.grid.storm.catalogs.ChunkDAOUtils.printWarnings; -import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; -import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; -import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.NamespaceException; -import it.grid.storm.namespace.naming.SURL; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; - -/** - * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect - * to a MySQL DB. The raw data found in those tables is pre-treated in order to - * turn it into the Object Model of StoRM. See Method comments for further info. - * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the - * object model. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDAO { - - private static final Logger log = LoggerFactory.getLogger(PtPChunkDAO.class); - - /* String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /* String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getDBURL(); - /* String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /* String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /* Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - private static final PtPChunkDAO dao = new PtPChunkDAO(); - - /* timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /* - * timer task that will update the boolean signaling that a reconnection is - * needed - */ - private TimerTask clockTask = null; - /* milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - /* initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - /* boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private StatusCodeConverter statusCodeConverter = StatusCodeConverter.getInstance(); - - private PtPChunkDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the PtPChunkDAO. - */ - public static PtPChunkDAO getInstance() { - - return dao; - } - - /** - * Method used to save the changes made to a retrieved PtPChunkDataTO, back - * into the MySQL DB. Only the transferURL, statusCode and explanation, of - * status_Put table get written to the DB. Likewise for the pinLifetime and - * fileLifetime of request_queue. In case of any error, an error messagge gets - * logged but no exception is thrown. - */ - public synchronized void update(PtPChunkDataTO to) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: update - unable to get a valid connection!"); - return; - } - PreparedStatement updatePut = null; - try { - // prepare statement... - updatePut = con - .prepareStatement("UPDATE " - + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " - + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " - + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " - + "WHERE rp.ID=?"); - printWarnings(con.getWarnings()); - - updatePut.setString(1, to.transferURL()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(2, to.status()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(3, to.errString()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(4, to.pinLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(5, to.fileLifetime()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(6, to.fileStorageType()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(7, to.overwriteOption()); - printWarnings(updatePut.getWarnings()); - - updatePut.setString(8, to.normalizedStFN()); - printWarnings(updatePut.getWarnings()); - - updatePut.setInt(9, to.surlUniqueID()); - printWarnings(updatePut.getWarnings()); - - updatePut.setLong(10, to.primaryKey()); - printWarnings(updatePut.getWarnings()); - // run updateStatusPut... - log.trace("PtP CHUNK DAO - update method: {}", updatePut); - updatePut.executeUpdate(); - printWarnings(updatePut.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); - } finally { - close(updatePut); - } - } - - /** - * Updates the request_Put represented by the received ReducedPtPChunkDataTO - * by setting its normalized_targetSURL_StFN and targetSURL_uniqueID - * - * @param chunkTO - */ - public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: updateIncomplete - unable to get a valid connection!"); - return; - } - String str = "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " - + "WHERE ID=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setString(1, chunkTO.normalizedStFN()); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, chunkTO.surlUniqueID()); - printWarnings(stmt.getWarnings()); - - stmt.setLong(3, chunkTO.primaryKey()); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); - stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - } catch (SQLException e) { - log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to refresh the PtPChunkDataTO information from the MySQL DB. - * This method is intended to be used during the srmAbortRequest/File - * operation. In case of any error, an error message gets logged but no - * exception is thrown; a null PtPChunkDataTO is returned. - */ - public synchronized PtPChunkDataTO refresh(long id) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: refresh - unable to get a valid connection!"); - return null; - } - String prot = "SELECT tp.config_ProtocolsID FROM request_TransferProtocols tp " - + "WHERE tp.request_queueID IN " - + "(SELECT rp.request_queueID FROM request_Put rp WHERE rp.ID=?)"; - - String refresh = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.r_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode, sp.transferURL " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID=?"; - - PreparedStatement stmt = null; - ResultSet rs = null; - PtPChunkDataTO chunkDataTO = null; - - try { - // get protocols for the request - stmt = con.prepareStatement(prot); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(stmt); - - // get chunk of the request - stmt = con.prepareStatement(refresh); - printWarnings(con.getWarnings()); - - stmt.setLong(1, id); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - refresh method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - if (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - chunkDataTO.setTransferURL(rs.getString("sp.transferURL")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - if (rs.next()) { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but found " - + "more than one such chunks!", id); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! Possible DB corruption! " - + "refresh method invoked for specific chunk with id {}, but chunk " - + "NOT found in persistence!", id); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to refresh chunk! {}", e.getMessage(), e); - chunkDataTO = null; - } finally { - close(rs); - close(stmt); - } - return chunkDataTO; - } - - /** - * Method that queries the MySQL DB to find all entries matching the supplied - * TRequestToken. The Collection contains the corresponding PtPChunkDataTO - * objects. An initial simple query establishes the list of protocols - * associated with the request. A second complex query establishes all chunks - * associated with the request, by properly joining request_queue, request_Put - * and status_Put. The considered fields are: (1) From status_Put: the ID - * field which becomes the TOs primary key, and statusCode. (2) From - * request_Put: targetSURL and expectedFileSize. (3) From request_queue: - * pinLifetime, fileLifetime, config_FileStorageTypeID, s_token, - * config_OverwriteID. In case of any error, a log gets written and an empty - * collection is returned. No exception is returned. NOTE! Chunks in - * SRM_ABORTED status are NOT returned! This is important because this method - * is intended to be used by the Feeders to fetch all chunks in the request, - * and aborted chunks should not be picked up for processing! - */ - public synchronized Collection find(TRequestToken requestToken) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return null; - } - String strToken = requestToken.toString(); - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " - + "WHERE rq.r_token=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - close(rs); - close(find); - - // get chunks of the request - str = "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=? AND sp.statusCode<>?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - find.setString(1, strToken); - printWarnings(find.getWarnings()); - - find.setInt(2, - statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setProtocolList(protocols); - chunkDataTO.setRequestToken(strToken); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO associated to the - * given TRequestToken expressed as String. - */ - public synchronized Collection findReduced( - String reqtoken, Collection surls) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - boolean addInClause = surls != null && !surls.isEmpty(); - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rq.r_token=?"; - if (addInClause) { - str += " AND rp.targetSURL_uniqueID IN ("; - for (int i=0; i list = Lists.newArrayList(); - find.setString(1, reqtoken); - printWarnings(find.getWarnings()); - if (addInClause) { - Iterator iterator = surls.iterator(); - int start = 2; - while (iterator.hasNext()) { - TSURL surl = iterator.next(); - find.setInt(start++, surl.uniqueId()); - } - } - printWarnings(find.getWarnings()); - log.trace("PtP CHUNK DAO! findReduced with request token; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - /** - * Method that returns a Collection of ReducedPtPChunkDataTO corresponding to - * the IDs supplied in the given List of Long. If the List is null or empty, - * an empty collection is returned and error messages get logged. - */ - public synchronized Collection findReduced( - List ids) { - - if (ids != null && !ids.isEmpty()) { - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: findReduced - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get reduced chunks - String str = "SELECT rq.fileLifetime, rq.config_FileStorageTypeID, rp.ID, rp.targetSURL, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - log.trace("PtP CHUNK DAO! fetchReduced; {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - ReducedPtPChunkDataTO reducedChunkDataTO = null; - while (rs.next()) { - reducedChunkDataTO = new ReducedPtPChunkDataTO(); - reducedChunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - reducedChunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - reducedChunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - reducedChunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - reducedChunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - reducedChunkDataTO.setSurlUniqueID(uniqueID); - } - - reducedChunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(reducedChunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } else { - log.warn("ATTENTION in PtP CHUNK DAO! fetchReduced " - + "invoked with null or empty list of IDs!"); - return Lists.newArrayList(); - } - } - - /** - * Method used in extraordinary situations to signal that data retrieved from - * the DB was malformed and could not be translated into the StoRM object - * model. This method attempts to change the status of the chunk to - * SRM_FAILURE and record it in the DB, in the status_Put table. This - * operation could potentially fail because the source of the malformed - * problems could be a problematic DB; indeed, initially only log messages - * were recorded. Yet it soon became clear that the source of malformed data - * were actually the clients themselves and/or FE recording in the DB. In - * these circumstances the client would find its request as being in the - * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the - * encountered problems. - */ - public synchronized void signalMalformedPtPChunk(PtPChunkDataTO auxTO) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: signalMalformedPtPChunk - unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE status_Put sp SET sp.statusCode=" - + statusCodeConverter.toDB(SRM_FAILURE) - + ", sp.explanation=? " + "WHERE sp.request_PutID=" + auxTO.primaryKey(); - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - printWarnings(con.getWarnings()); - /* NB: Prepared statement spares DB-specific String notation! */ - signal.setString(1, "This chunk of the request is malformed!"); - printWarnings(signal.getWarnings()); - - log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); - signal.executeUpdate(); - printWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to signal in DB that a chunk of " - + "the request was malformed! Request: {}; Error: {}", auxTO.toString(), - e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method that returns the number of Put requests on the given SURL, that are - * in SRM_SPACE_AVAILABLE state. This method is intended to be used by - * PtPChunkCatalog in the isSRM_SPACE_AVAILABLE method invocation. In case of - * any error, 0 is returned. - */ - public synchronized int numberInSRM_SPACE_AVAILABLE(int surlUniqueID) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: numberInSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return 0; - } - - String str = "SELECT COUNT(rp.ID) FROM status_Put sp JOIN request_Put rp " - + "ON (sp.request_PutID=rp.ID) " - + "WHERE rp.targetSURL_uniqueID=? AND sp.statusCode=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - /* Prepared statement spares DB-specific String notation! */ - stmt.setInt(1, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2,statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - numberInSRM_SPACE_AVAILABLE method: {}", stmt); - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - int numberSpaceAvailable = 0; - if (rs.next()) { - numberSpaceAvailable = rs.getInt(1); - } - return numberSpaceAvailable; - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to determine " - + "numberInSRM_SPACE_AVAILABLE! Returning 0! {}", e.getMessage(), e); - return 0; - } finally { - close(rs); - close(stmt); - } - } - - /** - * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. - * - * @return a Map containing the ID of the request as key and the relative - * SURL as value - */ - public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { - - Map ids = Maps.newHashMap(); - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: getExpiredSRM_SPACE_AVAILABLE - unable to get a valid connection!"); - return ids; - } - - String idsstr = "SELECT rp.ID, rp.targetSURL FROM " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(idsstr); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to select expired " - + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that retrieves all ptp requests in SRM_REQUEST_INPROGRESS state which can be - * considered as expired. - * - * @return a Map containing the ID of the request as key and the involved array of SURLs as - * value - */ - public synchronized List getExpiredSRM_REQUEST_INPROGRESS(long expirationTime) { - - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error( - "PtP CHUNK DAO: getExpiredSRM_REQUEST_INPROGRESS - unable to get a valid connection!"); - return ids; - } - - String query = "SELECT rq.ID FROM request_queue rq, request_Put rp, status_Put sp " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; - - PreparedStatement stmt = null; - ResultSet rs = null; - - try { - stmt = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stmt.setLong(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - stmt.setLong(2, expirationTime); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - getExpiredSRM_REQUEST_INPROGRESS: {}", stmt); - - rs = stmt.executeQuery(); - printWarnings(stmt.getWarnings()); - - while (rs.next()) { - ids.add(rs.getLong("rq.ID")); - } - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to select expired " - + "SRM_REQUEST_INPROGRESS chunks of PtP requests. {}", - e.getMessage(), e); - - } finally { - close(rs); - close(stmt); - } - return ids; - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_SUCCESS. - * An array of long representing the primary key of each chunk is required. - * This is needed when the client invokes srmPutDone() In case of any error - * nothing happens and no exception is thrown, but proper messages get - * logged. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS(List ids) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS - unable to get a valid connection!"); - return; - } - - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? " + "WHERE sp.statusCode=? AND rp.ID IN (" - + StringUtils.join(ids.toArray(), ',') + ")"; - - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, - statusCodeConverter.toDB(SRM_SUCCESS)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_SUCCESS: {}", stmt); - - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count == 0) { - log.trace("PtPChunkDAO! No chunk of PtP request was " - + "transited from SRM_SPACE_AVAILABLE to SRM_SUCCESS."); - } else { - log.info("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_SUCCESS.", count); - } - } catch (SQLException e) { - log.error("PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_SUCCESS! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates chunks in SRM_SPACE_AVAILABLE state, into - * SRM_FILE_LIFETIME_EXPIRED. An array of Long representing the primary key - * of each chunk is required. This is needed when the client forgets to invoke - * srmPutDone(). In case of any error or exception, the returned int value - * will be zero or less than the input List size. - * - * @param the list of the request id to update - * - * @return The number of the updated records into the db - */ - public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=? " - + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; - - - if (!ids.isEmpty()) { - querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; - } - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, "Expired pinLifetime"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - log.trace( - "PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); - return count; - } - - /** - * Method that updates enqueued requests selected by id into SRM_FAILURE. - * An array of Long representing the id of each request is required. - * - * @param the list of the request id to update - * - * @return The number of the updated records. Zero or less than the input list size in case of errors. - */ - public synchronized int transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(Collection ids) { - - Preconditions.checkNotNull(ids, "Invalid list of id"); - - if (ids.isEmpty()) { - return 0; - } - - if (!checkConnection()) { - log.error("Unable to get a valid connection to the database!"); - return 0; - } - - String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " - + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " - + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " - + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; - - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(querySQL); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(SRM_FAILURE)); - printWarnings(stmt.getWarnings()); - - stmt.setString(3, "Request expired"); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); - printWarnings(stmt.getWarnings()); - - int i = 5; - for (Long id: ids) { - stmt.setLong(i, id); - printWarnings(stmt.getWarnings()); - i++; - } - - log.trace( - "PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", - stmt); - - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - } catch (SQLException e) { - log.error( - "PtPChunkDAO! Unable to transit chunks from " - + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", - e.getMessage(), e); - } finally { - close(stmt); - } - log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " - + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); - return count; - - } - - /** - * Method that transit chunks in SRM_SPACE_AVAILABLE to SRM_ABORTED, for the - * given SURL: the overall request status of the requests containing that - * chunk, is not changed! The TURL is set to null. Beware, that the chunks may - * be part of requests that have finished, or that still have not finished - * because other chunks are still being processed. - */ - public synchronized void transitSRM_SPACE_AVAILABLEtoSRM_ABORTED( - int surlUniqueID, String surl, String explanation) { - - if (!checkConnection()) { - log - .error("PtP CHUNK DAO: transitSRM_SPACE_AVAILABLEtoSRM_ABORTED - unable to get a valid connection!"); - return; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=?, sp.explanation=?, sp.transferURL=NULL " - + "WHERE sp.statusCode=? AND (rp.targetSURL_uniqueID=? OR rp.targetSURL=?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - stmt.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); - printWarnings(stmt.getWarnings()); - - stmt.setString(2, explanation); - printWarnings(stmt.getWarnings()); - - stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(4, surlUniqueID); - printWarnings(stmt.getWarnings()); - - stmt.setString(5, surl); - printWarnings(stmt.getWarnings()); - - log.trace("PtP CHUNK DAO - " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED: {}", stmt); - int count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - - if (count > 0) { - log.info("PtP CHUNK DAO! {} chunks were transited from " - + "SRM_SPACE_AVAILABLE to SRM_ABORTED.", count); - } else { - log.trace("PtP CHUNK DAO! No chunks " - + "were transited from SRM_SPACE_AVAILABLE to SRM_ABORTED."); - } - } catch (SQLException e) { - log.error("PtP CHUNK DAO! Unable to " - + "transitSRM_SPACE_AVAILABLEtoSRM_ABORTED! {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("PTP CHUNK DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - printWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("PTP CHUNK DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("PTP CHUNK DAO! Reconnecting to DB! "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Exception in takeDownConnection method - " - + "could not close connection! {}", e.getMessage(), e); - } - } - } - - public synchronized int updateStatus(int[] surlsUniqueIDs, String[] surls, - TStatusCode statusCode, String explanation) { - - if (explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: explanation=" + explanation); - } - return doUpdateStatus(null, surlsUniqueIDs, surls, statusCode, explanation, false, - true); - } - - public synchronized int updateStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode statusCode, - String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, - explanation, true, true); - } - - private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, - String[] surls, TStatusCode statusCode, String explanation, - boolean withRequestToken, boolean withExplaination) - throws IllegalArgumentException { - - if ((withRequestToken && requestToken == null) - || (withExplaination && explanation == null)) { - throw new IllegalArgumentException("Unable to perform the updateStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withExplaination=" - + withExplaination + " explaination=" + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " - + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; - if (withExplaination) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE "; - if (withRequestToken) { - str += buildTokenWhereClause(requestToken) + " AND "; - } - str += " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - PreparedStatement stmt = null; - int count = 0; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(statusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", - statusCode); - } else { - log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "to {}.", count, statusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, - e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || explanation == null) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken + " explanation=" - + explanation); - } - return doUpdateStatusOnMatchingStatus(requestToken, null, null, - expectedStatusCode, newStatusCode, explanation, true, false, true); - } - - public synchronized int updateStatusOnMatchingStatus(int[] surlsUniqueIDs, - String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - if (surlsUniqueIDs == null || surls == null || explanation == null - || surlsUniqueIDs.length == 0 || surls.length == 0 - || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surls=" - + surls + " explanation=" + explanation); - } - return doUpdateStatusOnMatchingStatus(null, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, explanation, false, true, true); - } - - public synchronized int updateStatusOnMatchingStatus( - TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, - TStatusCode expectedStatusCode, TStatusCode newStatusCode) { - - if (requestToken == null || requestToken.getValue().trim().isEmpty() - || surlsUniqueIDs == null || surls == null || surlsUniqueIDs.length == 0 - || surls.length == 0 || surlsUniqueIDs.length != surls.length) { - throw new IllegalArgumentException( - "Unable to perform the updateStatusOnMatchingStatus, " - + "invalid arguments: requestToken=" + requestToken - + "surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls); - } - return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, - expectedStatusCode, newStatusCode, null, true, true, false); - } - - private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, - int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation, boolean withRequestToken, - boolean withSurls, boolean withExplanation) { - - if ((withRequestToken && requestToken == null) - || (withExplanation && explanation == null) - || (withSurls && (surlsUniqueIDs == null || surls == null))) { - throw new IllegalArgumentException( - "Unable to perform the doUpdateStatusOnMatchingStatus, " - + "invalid arguments: withRequestToken=" + withRequestToken - + " requestToken=" + requestToken + " withSurls=" + withSurls - + " surlsUniqueIDs=" + surlsUniqueIDs + " surls=" + surls - + " withExplaination=" + withExplanation + " explanation=" - + explanation); - } - if (!checkConnection()) { - log - .error("PTP CHUNK DAO: updateStatusOnMatchingStatus - unable to get a valid connection!"); - return 0; - } - String str = "UPDATE " - + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=? "; - if (withExplanation) { - str += " , " + buildExpainationSet(explanation); - } - str += " WHERE sp.statusCode=? "; - if (withRequestToken) { - str += " AND " + buildTokenWhereClause(requestToken); - } - if (withSurls) { - str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); - } - - int count = 0; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(str); - printWarnings(con.getWarnings()); - stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); - printWarnings(stmt.getWarnings()); - - stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); - printWarnings(stmt.getWarnings()); - - log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); - count = stmt.executeUpdate(); - printWarnings(stmt.getWarnings()); - if (count == 0) { - log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " - + "from {} to {}.", expectedStatusCode, newStatusCode); - } else { - log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " - + "from {} to {}.", count, expectedStatusCode, newStatusCode); - } - } catch (SQLException e) { - log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", - expectedStatusCode, newStatusCode, e.getMessage(), e); - } finally { - close(stmt); - } - return count; - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0 || dn == null) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " dn=" + dn); - } - return find(surlsUniqueIDs, surlsArray, dn, true); - } - - public Collection find(int[] surlsUniqueIDs, String[] surlsArray) { - - if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 - || surlsArray == null || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray); - } - return find(surlsUniqueIDs, surlsArray, null, false); - } - - - private List chunkTOfromResultSet(ResultSet rs) - throws SQLException{ - - List results = Lists.newArrayList(); - while (rs.next()) { - - PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); - - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - results.add(chunkDataTO); - } - - return results; - } - - - - public synchronized List findActivePtPsOnSURLs(List surls){ - - if (surls == null || surls.isEmpty()){ - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null list of SURLs!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL in "+ makeSurlString((String[])surls.toArray()) +" )" - + "AND sp.statusCode = 24"; - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURLs(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - } - - - public synchronized List findActivePtPsOnSURL(String surl) { - return findActivePtPsOnSURL(surl, null); - } - - public synchronized List findActivePtPsOnSURL(String surl, - String currentRequestToken) { - - if (surl == null || surl.isEmpty()) { - throw new IllegalArgumentException("cannot find active active " - + "PtPs for an empty or null SURL!"); - } - - ResultSet rs = null; - PreparedStatement stat = null; - - try { - - String query = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and sp.statusCode=24 )"; - - if (currentRequestToken != null){ - query += "AND rq.r_token != ?"; - } - - stat = con.prepareStatement(query); - printWarnings(con.getWarnings()); - - stat.setString(1, surl); - - if (currentRequestToken != null){ - stat.setString(2, currentRequestToken); - } - - rs = stat.executeQuery(); - List results = chunkTOfromResultSet(rs); - - return results; - - } catch (SQLException e) { - - log.error("findActivePtPsOnSURL(): SQL Error: {}", e.getMessage(),e); - return Collections.emptyList(); - - } finally { - close(rs); - close(stat); - } - - } - - private synchronized Collection find(int[] surlsUniqueIDs, - String[] surlsArray, String dn, boolean withDn) - throws IllegalArgumentException { - - if ((withDn && dn == null) || surlsUniqueIDs == null - || surlsUniqueIDs.length == 0 || surlsArray == null - || surlsArray.length == 0) { - throw new IllegalArgumentException("Unable to perform the find, " - + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs - + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); - } - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - PreparedStatement find = null; - ResultSet rs = null; - try { - // get chunks of the request - String str = "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " - + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " - + "sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) - + " AND rp.targetSURL IN " - + makeSurlString(surlsArray) + " )"; - - if (withDn) { - str += " AND rq.client_dn=\'" + dn + "\'"; - } - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List list = Lists.newArrayList(); - - log.trace("PtP CHUNK DAO - find method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - PtPChunkDataTO chunkDataTO = null; - while (rs.next()) { - chunkDataTO = new PtPChunkDataTO(); - chunkDataTO.setFileStorageType(rs - .getString("rq.config_FileStorageTypeID")); - chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); - chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); - chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); - chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); - chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); - chunkDataTO.setClientDN(rs.getString("rq.client_dn")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("rq.proxy"); - if (!rs.wasNull() && blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - chunkDataTO.setVomsAttributes(new String(bdata)); - } - chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); - chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); - - chunkDataTO.setNormalizedStFN(rs - .getString("rp.normalized_targetSURL_StFN")); - int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); - if (!rs.wasNull()) { - chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); - } - - chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); - chunkDataTO.setRequestToken(rs.getString("rq.r_token")); - chunkDataTO.setStatus(rs.getInt("sp.statusCode")); - list.add(chunkDataTO); - } - return list; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - public synchronized List findProtocols(long requestQueueId) { - - if (!checkConnection()) { - log.error("PtP CHUNK DAO: find - unable to get a valid connection!"); - return Lists.newArrayList(); - } - String str = null; - PreparedStatement find = null; - ResultSet rs = null; - try { - str = "SELECT tp.config_ProtocolsID " - + "FROM request_TransferProtocols tp " + "WHERE tp.request_queueID=?"; - - find = con.prepareStatement(str); - printWarnings(con.getWarnings()); - - List protocols = Lists.newArrayList(); - find.setLong(1, requestQueueId); - printWarnings(find.getWarnings()); - - log.trace("PtP CHUNK DAO - findProtocols method: {}", find); - rs = find.executeQuery(); - printWarnings(find.getWarnings()); - - while (rs.next()) { - protocols.add(rs.getString("tp.config_ProtocolsID")); - } - - return protocols; - } catch (SQLException e) { - log.error("PTP CHUNK DAO: {}", e.getMessage(), e); - /* return empty Collection! */ - return Lists.newArrayList(); - } finally { - close(rs); - close(find); - } - } - - private String buildExpainationSet(String explanation) { - - return " sp.explanation='" + explanation + "' "; - } - - private String buildTokenWhereClause(TRequestToken requestToken) { - - return " rq.r_token='" + requestToken.toString() + "' "; - } - - private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { - - return " ( rp.targetSURL_uniqueID IN " - + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rp.targetSURL IN " - + makeSurlString(surls) + " ) "; - } - - /** - * Method that returns a String containing all Surl's IDs. - */ - private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { - - StringBuilder sb = new StringBuilder("("); - for (int i = 0; i < surlUniqueIDs.length; i++) { - if (i > 0) { - sb.append(","); - } - sb.append(surlUniqueIDs[i]); - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all Surls. - */ - private String makeSurlString(String[] surls) { - - StringBuilder sb = new StringBuilder("("); - int n = surls.length; - - for (int i = 0; i < n; i++) { - - SURL requestedSURL; - - try { - requestedSURL = SURL.makeSURLfromString(surls[i]); - } catch (NamespaceException e) { - log.error(e.getMessage(), e); - log.debug("Skip '{}' during query creation", surls[i]); - continue; - } - - sb.append("'"); - sb.append(requestedSURL.getNormalFormAsString()); - sb.append("','"); - sb.append(requestedSURL.getQueryFormAsString()); - sb.append("'"); - - if (i < (n - 1)) { - sb.append(","); - } - } - - sb.append(")"); - return sb.toString(); - } - -} \ No newline at end of file diff --git a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java deleted file mode 100644 index 9f1a5c7a3..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPChunkDataTO.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.config.Configuration; -import it.grid.storm.namespace.model.Protocol; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Timestamp; -import java.util.List; - -/** - * Class that represents a row in the Persistence Layer: this is all raw data - * referring to the PtPChunkData proper, that is, String and primitive types. - * - * Each field is initialized with default values as per SRM 2.2 specification: - * protocolList GSIFTP fileStorageType VOLATILE overwriteMode NEVER status - * SRM_REQUEST_QUEUED - * - * All other fields are 0 if int, or a white space if String. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class PtPChunkDataTO { - - private static final String FQAN_SEPARATOR = "#"; - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of status_Put record in DB - private String toSURL = " "; - private long expectedFileSize = 0; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - /* Database table request_Get fields END */ - - private String requestToken = " "; - private int pinLifetime = -1; - private int fileLifetime = -1; - private String fileStorageType = null; // initialised in constructor - private String spaceToken = " "; - private List protocolList = null; // initialised in constructor - private String overwriteOption = null; // initialised in constructor - private int status; // initialised in constructor - private String errString = " "; - private String turl = " "; - private Timestamp timeStamp = null; - - private String clientDN = null; - private String vomsAttributes = null; - - - public PtPChunkDataTO() { - - this.fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.getTFileStorageType(Configuration.getInstance() - .getDefaultFileStorageType())); - TURLPrefix protocolPreferences = new TURLPrefix(); - protocolPreferences.addProtocol(Protocol.GSIFTP); - this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); - this.overwriteOption = OverwriteModeConverter.getInstance().toDB( - TOverwriteMode.NEVER); - this.status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - } - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - requestToken = s; - } - - public Timestamp timeStamp() { - - return timeStamp; - } - - public void setTimeStamp(Timestamp timeStamp) { - - this.timeStamp = timeStamp; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - public int pinLifetime() { - - return pinLifetime; - } - - public void setPinLifetime(int n) { - - pinLifetime = n; - } - - public int fileLifetime() { - - return fileLifetime; - } - - public void setFileLifetime(int n) { - - fileLifetime = n; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method that sets the FileStorageType: if it is null nothing gets set. The - * deafult value is Permanent. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public String spaceToken() { - - return spaceToken; - } - - public void setSpaceToken(String s) { - - spaceToken = s; - } - - public long expectedFileSize() { - - return expectedFileSize; - } - - public void setExpectedFileSize(long l) { - - expectedFileSize = l; - } - - public List protocolList() { - - return protocolList; - } - - public void setProtocolList(List l) { - - if ((l != null) && (!l.isEmpty())) - protocolList = l; - } - - public String overwriteOption() { - - return overwriteOption; - } - - /** - * Method that sets the OverwriteMode: if it is null nothing gets set. The - * deafult value is Never. - */ - public void setOverwriteOption(String s) { - - if (s != null) - overwriteOption = s; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String transferURL() { - - return turl; - } - - public void setTransferURL(String s) { - - turl = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - vomsAttributes = s; - } - - public void setVomsAttributes(String[] fqaNsAsString) { - - vomsAttributes = ""; - for (int i = 0; i < fqaNsAsString.length; i++) { - vomsAttributes += fqaNsAsString[i]; - if (i < fqaNsAsString.length - 1) { - vomsAttributes += FQAN_SEPARATOR; - } - } - - } - - public String[] vomsAttributesArray() { - - return vomsAttributes.split(FQAN_SEPARATOR); - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(requestToken); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(pinLifetime); - sb.append(" "); - sb.append(fileLifetime); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - sb.append(spaceToken); - sb.append(" "); - sb.append(expectedFileSize); - sb.append(" "); - sb.append(protocolList); - sb.append(" "); - sb.append(overwriteOption); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(turl); - return sb.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPData.java b/src/main/java/it/grid/storm/catalogs/PtPData.java deleted file mode 100644 index bd6ce75b7..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPData.java +++ /dev/null @@ -1,59 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; - -public interface PtPData extends FileTransferData { - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken getSpaceToken(); - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds pinLifetime(); - - /** - * Method that returns the requested file life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds fileLifetime(); - - /** - * Method that returns the fileStorageType for this chunk of the srm request. - */ - public TFileStorageType fileStorageType(); - - /** - * Method that returns the knownSizeOfThisFile supplied with this chunk of the - * srm request. - */ - public TSizeInBytes expectedFileSize(); - - /** - * Method that returns the overwriteOption specified in the srm request. - */ - public TOverwriteMode overwriteOption(); - - /** - * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_SPACE_AVAILABLE(String explanation); - - /** - * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DUPLICATION_ERROR(String explanation); - -} diff --git a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java b/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java deleted file mode 100644 index beee449d0..000000000 --- a/src/main/java/it/grid/storm/catalogs/PtPPersistentChunkData.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by - * applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS - * OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package it.grid.storm.catalogs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TOverwriteMode; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents a PrepareToPutChunkData, that is part of a multifile - * PrepareToPut srm request. It contains data about: the requestToken, the - * toSURL, the requested lifeTime of pinning, the requested lifetime of - * volatile, the requested fileStorageType and any available spaceToken, the - * expectedFileSize, the desired transferProtocols in order of preference, the - * overwriteOption to be applied in case the file already exists, the - * transferURL for the supplied SURL. - * - * @author EGRID - ICTP Trieste - * @date June, 2005 - * @version 2.0 - */ -public class PtPPersistentChunkData extends IdentityPtPData implements - PersistentChunkData { - - private static final Logger log = LoggerFactory - .getLogger(PtPPersistentChunkData.class); - - /** - * long representing the primary key for the persistence layer, in the - * status_Put table - */ - private long primaryKey = -1; - - /** - * This is the requestToken of the multifile srm request to which this chunk - * belongs - */ - private final TRequestToken requestToken; - - public PtPPersistentChunkData(GridUserInterface auth, - TRequestToken requestToken, TSURL toSURL, TLifeTimeInSeconds pinLifetime, - TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TSizeInBytes expectedFileSize, - TURLPrefix transferProtocols, TOverwriteMode overwriteOption, - TReturnStatus status, TTURL transferURL) - throws InvalidPtPPersistentChunkDataAttributesException, - InvalidPtPDataAttributesException, - InvalidFileTransferDataAttributesException, - InvalidSurlRequestDataAttributesException { - - super(auth, toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, transferURL); - if (requestToken == null) { - log.debug("PtPPersistentChunkData: requestToken is null!"); - throw new InvalidPtPPersistentChunkDataAttributesException(requestToken, - toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, - expectedFileSize, transferProtocols, overwriteOption, status, - transferURL); - } - this.requestToken = requestToken; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - @Override - public long getPrimaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - @Override - public TRequestToken getRequestToken() { - - return requestToken; - } - - @Override - public long getIdentifier() { - - return getPrimaryKey(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = super.hashCode(); - result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PtPPersistentChunkData other = (PtPPersistentChunkData) obj; - if (primaryKey != other.primaryKey) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("PtPPersistentChunkData [primaryKey="); - builder.append(primaryKey); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", fileStorageType="); - builder.append(fileStorageType); - builder.append(", overwriteOption="); - builder.append(overwriteOption); - builder.append(", expectedFileSize="); - builder.append(expectedFileSize); - builder.append(", transferProtocols="); - builder.append(transferProtocols); - builder.append(", SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append(", transferURL="); - builder.append(transferURL); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java deleted file mode 100644 index 461a5d8fe..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkData.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedBringOnLineChunkData, that is part of a - * multifile PrepareToGet srm request. It is closely related to BoLChunkData but - * it is called Reduced because it only contains the fromSURL, the current - * TReturnStatus, and the primary key of the request. - * - * This class is intended to be used by srmReleaseFiles, where only a limited - * amunt of information is needed instead of full blown BoLChunkData. - * - * @author CNAF - * @date Aug 2009 - * @version 1.0 - */ -public class ReducedBoLChunkData implements ReducedChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedBoLChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL fromSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - - public ReducedBoLChunkData(TSURL fromSURL, TReturnStatus status) - throws InvalidReducedBoLChunkDataAttributesException { - - boolean ok = status != null && fromSURL != null; - if (!ok) { - throw new InvalidReducedBoLChunkDataAttributesException(fromSURL, status); - } - this.fromSURL = fromSURL; - this.status = status; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedBoLChunkData)) { - return false; - } - ReducedBoLChunkData cd = (ReducedBoLChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && status.equals(cd.status); - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - public boolean isPinned() { - - if (status.getStatusCode() == TStatusCode.SRM_SUCCESS) { - return true; - } - return false; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedBoLChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java deleted file mode 100644 index 5e6a3502d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedBoLChunkDataTO.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedBoLChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date November, 2006 - */ -public class ReducedBoLChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public String errString() { - - return errString; - } - - public String fromSURL() { - - return fromSURL; - } - - public long primaryKey() { - - return primaryKey; - } - - public void setErrString(String s) { - - errString = s; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public void setStatus(int n) { - - status = n; - } - - public int status() { - - return status; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java deleted file mode 100644 index a98d06d9d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkData.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -/** - * This class represents a ReducedCopyChunkData, that is part of a multifile - * Copy srm request. It contains data about: the requestToken, the fromSURL, the - * toSURL, return status of the file together with its error string. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkData { - - /* long representing the primary key for the persistence layer! */ - private long primaryKey = -1; - /* SURL from which the srmCopy will get the file */ - private TSURL fromSURL; - /* SURL to which the srmCopy will put the file */ - private TSURL toSURL; - /* Return status for this chunk of request */ - private TReturnStatus status; - - public ReducedCopyChunkData(TSURL fromSURL, TSURL toSURL, TReturnStatus status) - throws InvalidReducedCopyChunkDataAttributesException { - - if (fromSURL == null || toSURL == null || status == null) { - throw new InvalidReducedCopyChunkDataAttributesException(fromSURL, - toSURL, status); - } - - this.fromSURL = fromSURL; - this.toSURL = toSURL; - this.status = status; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL toSURL() { - - return toSURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("CopyChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("RequestToken="); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("toSURL="); - sb.append(toSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + toSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedCopyChunkData)) { - return false; - } - ReducedCopyChunkData cd = (ReducedCopyChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && toSURL.equals(cd.toSURL) && status.equals(cd.status); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java deleted file mode 100644 index 7ff37389d..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedCopyChunkDataTO.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedCopyChunkData proper, that is - * String and primitive types. - * - * All other fields are 0 if int, or a white space if String. - * - * @author Michele Dibenedetto - */ -public class ReducedCopyChunkDataTO { - - /* Database table request_Get fields BEGIN */ - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedSourceStFN = null; - private Integer sourceSurlUniqueID = null; - private String toSURL = " "; - private String normalizedTargetStFN = null; - private Integer targetSurlUniqueID = null; - /* Database table request_Get fields END */ - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedSourceStFN() { - - return normalizedSourceStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedSourceStFN(String normalizedStFN) { - - this.normalizedSourceStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer sourceSurlUniqueID() { - - return sourceSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setSourceSurlUniqueID(Integer surlUniqueID) { - - this.sourceSurlUniqueID = surlUniqueID; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @return the normalizedStFN - */ - public String normalizedTargetStFN() { - - return normalizedTargetStFN; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedTargetStFN(String normalizedStFN) { - - this.normalizedTargetStFN = normalizedStFN; - } - - /** - * @return the surlUniqueID - */ - public Integer targetSurlUniqueID() { - - return targetSurlUniqueID; - } - - /** - * @param surlUniqueID - * the surlUniqueID to set - */ - public void setTargetSurlUniqueID(Integer surlUniqueID) { - - this.targetSurlUniqueID = surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedSourceStFN); - sb.append(" "); - sb.append(sourceSurlUniqueID); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedTargetStFN); - sb.append(" "); - sb.append(targetSurlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java deleted file mode 100644 index 5446a4257..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkData.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedPrepareToGetChunkData, that is part of a - * multifile PrepareToGet srm request. It is closely related to PtGChunkData but - * it is called Reduced because it only contains the fromSURL, the current - * TReturnStatus, and the primary key of the request. - * - * This class is intended to be used by srmReleaseFiles, where only a limited - * amunt of information is needed instead of full blown PtGChunkData. - * - * @author EGRID - ICTP Trieste - * @date November, 2006 - * @version 1.0 - */ -public class ReducedPtGChunkData implements ReducedChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedPtGChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL fromSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - - public ReducedPtGChunkData(TSURL fromSURL, TReturnStatus status) - throws InvalidReducedPtGChunkDataAttributesException { - - if (status == null || fromSURL == null) { - throw new InvalidReducedPtGChunkDataAttributesException(fromSURL, status); - } - this.fromSURL = fromSURL; - this.status = status; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - return hash; - } - - public boolean isPinned() { - - if (status.getStatusCode() == TStatusCode.SRM_FILE_PINNED) { - return true; - } - return false; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedPtGChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("."); - return sb.toString(); - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedPtGChunkData)) { - return false; - } - ReducedPtGChunkData cd = (ReducedPtGChunkData) o; - return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) - && status.equals(cd.status); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java deleted file mode 100644 index a73a3b651..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtGChunkDataTO.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; - -/** - * Class that represents some of the fileds in a row in the Persistence Layer: - * this is all raw data referring to the ReducedPtGChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date November, 2006 - */ -public class ReducedPtGChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String fromSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String fromSURL() { - - return fromSURL; - } - - public void setFromSURL(String s) { - - fromSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(fromSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java b/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java deleted file mode 100644 index ac90058c6..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkData.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a ReducedPrepareToPutChunkData, that is part of a - * multifile PrepareToPut srm request. It is closely related to PtPChunkData but - * it is called Reduced because it only contains the toSURL, the current - * TReturnStatus, the TFileStorageType, the FileLifeTime in case of Volatile, - * the VomsGridUser limited to the DN, and the primary key of the request. - * - * This class is intended to be used by srmPutDone, where only a limited amount - * of information is needed instead of full blown PtPChunkData. It is also used - * by the automatic handlnig of non invoked srmPutDone, during transition to - * SRM_FILE_LIFETIME_EXPIRED. - * - * @author EGRID - ICTP Trieste - * @date January, 2007 - * @version 2.0 - */ -public class ReducedPtPChunkData { - - @SuppressWarnings("unused") - private static final Logger log = LoggerFactory - .getLogger(ReducedPtPChunkData.class); - - private long primaryKey = -1; // long representing the primary key for the - // persistence layer! - private TSURL toSURL; // SURL that the srm command wants to get - private TReturnStatus status; // return status for this chunk of request - private TFileStorageType fileStorageType; // fileStorageType of this shunk of - // the request - private TLifeTimeInSeconds fileLifetime; // requested lifetime for SURL in - // case of Volatile entry. - - public ReducedPtPChunkData(TSURL toSURL, TReturnStatus status, - TFileStorageType fileStorageType, TLifeTimeInSeconds fileLifetime) - throws InvalidReducedPtPChunkDataAttributesException { - - if (status == null || toSURL == null || fileStorageType == null - || fileLifetime == null) { - throw new InvalidReducedPtPChunkDataAttributesException(toSURL, status, - fileStorageType, fileLifetime); - } - this.toSURL = toSURL; - this.status = status; - this.fileStorageType = fileStorageType; - this.fileLifetime = fileLifetime; - } - - /** - * Method used to get the primary key used in the persistence layer! - */ - public long primaryKey() { - - return primaryKey; - } - - /** - * Method used to set the primary key to be used in the persistence layer! - */ - public void setPrimaryKey(long l) { - - primaryKey = l; - } - - /** - * Method that returns the toSURL of the srm request to which this chunk - * belongs. - */ - public TSURL toSURL() { - - return toSURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - /** - * Method that returns the TFileStorageType of the srm request to which this - * chunk belongs. - */ - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the fileLifetime of the srm request to which this chunk - * belongs. - */ - public TLifeTimeInSeconds fileLifetime() { - - return fileLifetime; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("ReducedPtPChunkData\n"); - sb.append("primaryKey="); - sb.append(primaryKey); - sb.append("; "); - sb.append("toSURL="); - sb.append(toSURL); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append(";"); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append(";"); - sb.append("fileLifetime="); - sb.append(fileLifetime); - sb.append("."); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + new Long(primaryKey).hashCode(); - hash = 37 * hash + toSURL.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + fileLifetime.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof ReducedPtPChunkData)) { - return false; - } - ReducedPtPChunkData cd = (ReducedPtPChunkData) o; - return (primaryKey == cd.primaryKey) && toSURL.equals(cd.toSURL) - && status.equals(cd.status) && fileStorageType.equals(cd.fileStorageType) - && fileLifetime.equals(cd.fileLifetime); - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java b/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java deleted file mode 100644 index e83eb0aa1..000000000 --- a/src/main/java/it/grid/storm/catalogs/ReducedPtPChunkDataTO.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TFileStorageType; - -/** - * Class that represents some of the fields in a row in the Persistence Layer: - * this is all raw data referring to the ReducedPtPChunkData proper, that is - * String and primitive types. - * - * @author EGRID ICTP - * @version 1.0 - * @date January, 2007 - */ -public class ReducedPtPChunkDataTO { - - private long primaryKey = -1; // ID primary key of record in DB - private String toSURL = " "; - private String normalizedStFN = null; - private Integer surlUniqueID = null; - - private int status = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_QUEUED); - private String errString = " "; - private String fileStorageType = FileStorageTypeConverter.getInstance().toDB( - TFileStorageType.VOLATILE); - private int fileLifetime = -1; - - public long primaryKey() { - - return primaryKey; - } - - public void setPrimaryKey(long n) { - - primaryKey = n; - } - - public String toSURL() { - - return toSURL; - } - - public void setToSURL(String s) { - - toSURL = s; - } - - /** - * @param normalizedStFN - * the normalizedStFN to set - */ - public void setNormalizedStFN(String normalizedStFN) { - - this.normalizedStFN = normalizedStFN; - } - - /** - * @return the normalizedStFN - */ - public String normalizedStFN() { - - return normalizedStFN; - } - - /** - * @param surlUniqueID - * the sURLUniqueID to set - */ - public void setSurlUniqueID(Integer surlUniqueID) { - - this.surlUniqueID = surlUniqueID; - } - - /** - * @return the sURLUniqueID - */ - public Integer surlUniqueID() { - - return surlUniqueID; - } - - public int status() { - - return status; - } - - public void setStatus(int n) { - - status = n; - } - - public String errString() { - - return errString; - } - - public void setErrString(String s) { - - errString = s; - } - - public String fileStorageType() { - - return fileStorageType; - } - - /** - * Method that sets the FileStorageType: if it is null nothing gets set. The - * deafult value is Volatile. - */ - public void setFileStorageType(String s) { - - if (s != null) - fileStorageType = s; - } - - public int fileLifetime() { - - return fileLifetime; - } - - public void setFileLifetime(int n) { - - fileLifetime = n; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(primaryKey); - sb.append(" "); - sb.append(toSURL); - sb.append(" "); - sb.append(normalizedStFN); - sb.append(" "); - sb.append(surlUniqueID); - sb.append(" "); - sb.append(status); - sb.append(" "); - sb.append(errString); - sb.append(" "); - sb.append(fileStorageType); - sb.append(" "); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java index cc81d144e..afd68faf6 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/RequestSummaryCatalog.java @@ -17,12 +17,32 @@ package it.grid.storm.catalogs; -import it.grid.storm.catalogs.timertasks.RequestsGarbageCollector; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; import it.grid.storm.griduser.FQAN; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.converter.PinLifetimeConverter; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; +import it.grid.storm.persistence.exceptions.MalformedGridUserException; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; +import it.grid.storm.persistence.model.RequestSummaryDataTO; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TRequestToken; @@ -31,21 +51,11 @@ import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TStatusCode; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Timer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - /** - * Class that represents the RequestSummaryCatalog of StoRM. The rows in the - * catalog are called RequestSummaryData. Methods are provided to: look up newly - * added requests as identified by their SRM_REQUEST_QUEUED status, to update - * the global status of the request, and to fail a request with SRM_FAILURE. + * Class that represents the RequestSummaryCatalog of StoRM. The rows in the catalog are called + * RequestSummaryData. Methods are provided to: look up newly added requests as identified by their + * SRM_REQUEST_QUEUED status, to update the global status of the request, and to fail a request with + * SRM_FAILURE. * * @author EGRID - ICTP Trieste * @version 2.0 @@ -53,389 +63,306 @@ */ public class RequestSummaryCatalog { - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryCatalog.class); - /** Only instance of RequestSummaryCatalog for StoRM! */ - private static RequestSummaryCatalog cat = new RequestSummaryCatalog(); - /** WARNING!!! TO BE MODIFIED WITH FACTORY!!! */ - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - /** timer thread that will run a task to clean */ - private Timer clock = null; - /** configuration instance **/ - private final Configuration config = Configuration.getInstance(); - - private RequestSummaryCatalog() { - - clock = new Timer(); - - clock.schedule( - new RequestsGarbageCollector(clock, - config.getRequestPurgerPeriod() * 1000), - config.getRequestPurgerDelay() * 1000); - } - - /** - * Method that returns the only instance of RequestSummaryCatalog present in - * StoRM. - */ - public static RequestSummaryCatalog getInstance() { - - return RequestSummaryCatalog.cat; - } - - /** - * Method in charge of retrieving RequestSummaryData associated to new - * requests, that is those found in SRM_REQUETS_QUEUED global status; such - * requests then transit into SRM_SUCCESS. The actual number of fetched - * requests depends on the configured ceiling. - * - * If no new request is found, an empty Collection is returned. if a request - * is malformed, then that request is failed and an attempt is made to signal - * such occurrence in the DB. Only correctly formed requests are returned. - */ - synchronized public Collection fetchNewRequests( - int capacity) { - - List list = Lists.newArrayList(); - - Collection c = dao.findNew(capacity); - if (c == null || c.isEmpty()) { - return list; - } - int fetched = c.size(); - log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); - for (RequestSummaryDataTO auxTO : c) { - RequestSummaryData aux = null; - try { - aux = makeOne(auxTO); - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " - + "operation. IllegalArgumentException: {}", e.getMessage(), e); - continue; - } - if (aux != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " - + "for processing", aux.requestToken(), aux.gridUser().getDn()); - list.add(aux); - } - } - int ret = list.size(); - if (ret < fetched) { - log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " - + "since the dropped ones were malformed!", ret); - } else { - log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " - + "requests.", ret); - } - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); - } - return list; - } - - /** - * Private method used to create a RequestSummaryData object, from a - * RequestSummaryDataTO. If a chunk cannot be created, an error messagge gets - * logged and an attempt is made to signal in the DB that the request is - * malformed. - */ - private RequestSummaryData makeOne(RequestSummaryDataTO to) - throws IllegalArgumentException { - - TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM( - to.requestType()); - if (auxrtype == TRequestType.EMPTY) { - StringBuilder sb = new StringBuilder(); - sb.append("TRequestType could not be created from its String representation "); - sb.append(to.requestType()); - sb.append("\n"); - log.warn(sb.toString()); - throw new IllegalArgumentException( - "Invalid TRequestType in the provided RequestSummaryDataTO"); - } - TRequestToken auxrtoken; - try { - auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); - } catch (InvalidTRequestTokenAttributesException e) { - log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " - + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); - throw new IllegalArgumentException( - "Unable to create TRequestToken from RequestSummaryDataTO."); - } - GridUserInterface auxgu; - - try { - auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); - } catch (MalformedGridUserException e) { - StringBuilder sb = new StringBuilder(); - sb.append("VomsGridUser could not be created from DN String "); - sb.append(to.clientDN()); - sb.append(" voms attributes String "); - sb.append(to.vomsAttributes()); - sb.append(" and from request token String "); - sb.append(to.requestToken()); - log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); - throw new IllegalArgumentException( - "Unable to load Voms Grid User from RequestSummaryDataTO. " - + "MalformedGridUserException: " + e.getMessage()); - } - RequestSummaryData data = null; - try { - data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); - data.setPrimaryKey(to.primaryKey()); - } catch (InvalidRequestSummaryDataAttributesException e) { - dao.failRequest(to.primaryKey(), "The request data is malformed!"); - log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " - + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); - throw new IllegalArgumentException("Unable to reate RequestSummaryData"); - } - TReturnStatus status = null; - if (to.getStatus() != null) { - TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); - if (code == TStatusCode.EMPTY) { - log.warn("RequestSummaryDataTO retrieved StatusCode was not " - + "recognised: {}", to.getStatus()); - } else { - status = new TReturnStatus(code, to.getErrstring()); - } - } - data.setUserToken(to.getUserToken()); - data.setRetrytime(to.getRetrytime()); - if (to.getPinLifetime() != null) { - data.setPinLifetime(TLifeTimeInSeconds.make(PinLifetimeConverter - .getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); - } - data.setSpaceToken(to.getSpaceToken()); - data.setStatus(status); - data.setErrstring(to.getErrstring()); - data.setRemainingTotalTime(to.getRemainingTotalTime()); - data.setNbreqfiles(to.getNbreqfiles()); - data.setNumOfCompleted(to.getNumOfCompleted()); - if (to.getFileLifetime() != null) { - data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), - TimeUnit.SECONDS)); - } - - data.setDeferredStartTime(to.getDeferredStartTime()); - data.setNumOfWaiting(to.getNumOfWaiting()); - data.setNumOfFailed(to.getNumOfFailed()); - data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); - return data; - } - - /** - * Private method that holds the logic for creating a VomsGridUser from - * persistence and to load any available Proxy. For the moment the VOMS - * attributes present in persistence are NOT loaded! - */ - private GridUserInterface loadVomsGridUser(String dn, String fqansString) throws MalformedGridUserException { - - log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); - - if (dn == null) { - throw new MalformedGridUserException("Invalid null DN"); - } - if (fqansString == null || fqansString.isEmpty()) { - return GridUserManager.makeGridUser(dn); - } - - FQAN[] fqans = new FQAN[fqansString.split("#").length]; - int i = 0; - for (String fqan: fqansString.split("#")) { - fqans[i++] = new FQAN(fqan); - } - try { - return GridUserManager.makeVOMSGridUser(dn, fqans); - } catch (IllegalArgumentException e) { - log.error("Unexpected error on voms grid user creation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - throw new MalformedGridUserException(e.getMessage()); - } - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. In case of any exception - * nothing happens. - */ - synchronized public void updateGlobalStatus(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatus(rt.toString(), StatusCodeConverter.getInstance() - .toDB(status.getStatusCode()), status.getExplanation()); - } - - public void updateFromPreviousGlobalStatus(TRequestToken requestToken, - TStatusCode expectedStatusCode, TStatusCode newStatusCode, - String explanation) { - - dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, - expectedStatusCode, newStatusCode, explanation); - } - - /** - * Method used to update the global status of a request identified by - * TRequestToken, to the supplied TReturnStatus. The pin lifetime and the file - * lifetime are updated in order to start the countdown from the moment the - * status is updated. In case of any exception nothing happens. - */ - synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, - TReturnStatus status) { - - dao.updateGlobalStatusPinFileLifetime(rt.toString(), StatusCodeConverter - .getInstance().toDB(status.getStatusCode()), status.getExplanation()); - } - - /** - * Method used to change the global status of the supplied request to - * SRM_FAILURE, as well as that of each single chunk in the request. If the - * request type is not supported by the logic, only the global status is - * updated and an error log gets written warning of the unsupported business - * logic. - * - * If the supplied RequestSummaryData is null, nothing gets done; if any DB - * error occurs, no exception gets thrown but proper messagges get logged. - */ - synchronized public void failRequest(RequestSummaryData rsd, - String explanation) { - - if (rsd != null) { - TRequestType rtype = rsd.requestType(); - if (rtype == TRequestType.PREPARE_TO_GET) { - dao.failPtGRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.PREPARE_TO_PUT) { - dao.failPtPRequest(rsd.primaryKey(), explanation); - } else if (rtype == TRequestType.COPY) { - dao.failCopyRequest(rsd.primaryKey(), explanation); - } else { - dao.failRequest(rsd.primaryKey(), explanation); - } - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; if the status of the request associated to the supplied request - * token tok is different from SRM_REQUEST_QUEUED, then nothing takes place; - * likewise if the supplied token does not correspond to any request, or if it - * is null. - */ - synchronized public void abortRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortRequest(rt.toString()); - } - } - - /** - * Method used to abort a request that has not yet been fetched for - * processing; abort is only applied to those SURLs of the request specified - * in the Collection; if the status of the request associated to the supplied - * request token is different from SRM_REQUEST_QUEUED, then nothing takes - * place; likewise if the supplied token does not correspond to any request, - * if it is null, if the Collection is null, or the Collection does not - * contain TSURLs. - */ - synchronized public void abortChunksOfRequest(TRequestToken rt, - Collection c) { - - if ((rt != null) && (c != null) && (!c.isEmpty())) { - try { - ArrayList aux = new ArrayList(); - for (TSURL tsurl : c) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfRequest: the supplied Collection did not contain " - + "TSURLs! Error: {}", e.getMessage(), e); - } - } - } - - /** - * Method used to abort a request that HAS been fetched for processing; abort - * is only applied to those SURLs of the request specified in the Collection; - * if the status of the request associated to the supplied request token is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, if it is null, if - * the Collection is null, or the Collection does not contain TSURLs. - */ - synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, - Collection tsurls) { - - if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { - try { - List aux = new ArrayList(); - for (TSURL tsurl : tsurls) { - aux.add(tsurl.toString()); - } - dao.abortChunksOfInProgressRequest(rt.toString(), aux); - } catch (ClassCastException e) { - log.error("REQUEST SUMMARY CATALOG! Unexpected error in " - + "abortChunksOfInProgressRequest: the supplied Collection did not " - + "contain TSURLs! Error: {}", e.getMessage()); - } - } - } - - synchronized public RequestSummaryData find(TRequestToken requestToken) - throws IllegalArgumentException { - - if (requestToken == null || requestToken.toString().trim().isEmpty()) { - throw new IllegalArgumentException( - "Unable to perform find, illegal arguments: requestToken=" - + requestToken); - } - RequestSummaryDataTO to = dao.find(requestToken.toString()); - if (to != null) { - try { - RequestSummaryData data = makeOne(to); - if (data != null) { - log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", - data.requestToken(), data.gridUser().getDn()); - return data; - } - } catch (IllegalArgumentException e) { - log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); - } - return null; - } - - /** - * Method that returns the TRequestType associated to the request with the - * supplied TRequestToken. If no request exists with that token, or the type - * cannot be established from the DB, or the supplied token is null, then an - * EMPTY TRequestType is returned. - */ - synchronized public TRequestType typeOf(TRequestToken rt) { - - TRequestType result = TRequestType.EMPTY; - String type = null; - if (rt != null) { - type = dao.typeOf(rt.toString()); - if (type != null && !type.isEmpty()) - result = RequestTypeConverter.getInstance().toSTORM(type); - } - return result; - } - - /** - * Method used to abort a request that HAS been fetched for processing; if the - * status of the request associated to the supplied request token tok is - * different from SRM_REQUEST_INPROGRESS, then nothing takes place; likewise - * if the supplied token does not correspond to any request, or if it is null. - */ - synchronized public void abortInProgressRequest(TRequestToken rt) { - - if (rt != null) { - dao.abortInProgressRequest(rt.toString()); - } - } + private static final Logger log = LoggerFactory.getLogger(RequestSummaryCatalog.class); + + private static RequestSummaryCatalog instance; + private final RequestSummaryDAO dao; + + public static synchronized RequestSummaryCatalog getInstance() { + if (instance == null) { + instance = new RequestSummaryCatalog(); + } + return instance; + } + + private RequestSummaryCatalog() { + dao = RequestSummaryDAOMySql.getInstance(); + } + + /** + * Method in charge of retrieving RequestSummaryData associated to new requests, that is those + * found in SRM_REQUETS_QUEUED global status; such requests then transit into SRM_SUCCESS. The + * actual number of fetched requests depends on the configured ceiling. + * + * If no new request is found, an empty Collection is returned. if a request is malformed, then + * that request is failed and an attempt is made to signal such occurrence in the DB. Only + * correctly formed requests are returned. + */ + synchronized public Collection fetchNewRequests(int capacity) { + + List list = Lists.newArrayList(); + + Collection c = dao.fetchNewRequests(capacity); + if (c == null || c.isEmpty()) { + return list; + } + int fetched = c.size(); + log.debug("REQUEST SUMMARY CATALOG: {} new requests picked up.", fetched); + for (RequestSummaryDataTO auxTO : c) { + RequestSummaryData aux = null; + try { + aux = makeOne(auxTO); + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG: Failure while performing makeOne " + + "operation. IllegalArgumentException: {}", e.getMessage(), e); + continue; + } + if (aux != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} included " + "for processing", + aux.requestToken(), aux.gridUser().getDn()); + list.add(aux); + } + } + int ret = list.size(); + if (ret < fetched) { + log.warn("REQUEST SUMMARY CATALOG: including {} requests for processing, " + + "since the dropped ones were malformed!", ret); + } else { + log.debug("REQUEST SUMMARY CATALOG: including for processing all {} " + "requests.", ret); + } + if (!list.isEmpty()) { + log.debug("REQUEST SUMMARY CATALOG: returning {}\n\n", list); + } + return list; + } + + /** + * Private method used to create a RequestSummaryData object, from a RequestSummaryDataTO. If a + * chunk cannot be created, an error messagge gets logged and an attempt is made to signal in the + * DB that the request is malformed. + */ + private RequestSummaryData makeOne(RequestSummaryDataTO to) throws IllegalArgumentException { + + TRequestType auxrtype = RequestTypeConverter.getInstance().toSTORM(to.requestType()); + if (auxrtype == TRequestType.EMPTY) { + StringBuilder sb = new StringBuilder(); + sb.append("TRequestType could not be created from its String representation "); + sb.append(to.requestType()); + sb.append("\n"); + log.warn(sb.toString()); + throw new IllegalArgumentException( + "Invalid TRequestType in the provided RequestSummaryDataTO"); + } + TRequestToken auxrtoken; + try { + auxrtoken = new TRequestToken(to.requestToken(), to.timestamp()); + } catch (InvalidTRequestTokenAttributesException e) { + log.warn("Unable to create TRequestToken from RequestSummaryDataTO. " + + "InvalidTRequestTokenAttributesException: {}", e.getMessage()); + throw new IllegalArgumentException( + "Unable to create TRequestToken from RequestSummaryDataTO."); + } + GridUserInterface auxgu; + + try { + auxgu = loadVomsGridUser(to.clientDN(), to.vomsAttributes()); + } catch (MalformedGridUserException e) { + StringBuilder sb = new StringBuilder(); + sb.append("VomsGridUser could not be created from DN String "); + sb.append(to.clientDN()); + sb.append(" voms attributes String "); + sb.append(to.vomsAttributes()); + sb.append(" and from request token String "); + sb.append(to.requestToken()); + log.warn("{}. MalformedGridUserException: {}", sb.toString(), e.getMessage()); + throw new IllegalArgumentException("Unable to load Voms Grid User from RequestSummaryDataTO. " + + "MalformedGridUserException: " + e.getMessage()); + } + RequestSummaryData data = null; + try { + data = new RequestSummaryData(auxrtype, auxrtoken, auxgu); + data.setPrimaryKey(to.primaryKey()); + } catch (InvalidRequestSummaryDataAttributesException e) { + dao.failRequest(to.primaryKey(), "The request data is malformed!"); + log.warn("REQUEST SUMMARY CATALOG! Unable to create RequestSummaryData. " + + "InvalidRequestSummaryDataAttributesException: {}", e.getMessage(), e); + throw new IllegalArgumentException("Unable to reate RequestSummaryData"); + } + TReturnStatus status = null; + if (to.getStatus() != null) { + TStatusCode code = StatusCodeConverter.getInstance().toSTORM(to.getStatus()); + if (code == TStatusCode.EMPTY) { + log.warn("RequestSummaryDataTO retrieved StatusCode was not " + "recognised: {}", + to.getStatus()); + } else { + status = new TReturnStatus(code, to.getErrstring()); + } + } + data.setUserToken(to.getUserToken()); + data.setRetrytime(to.getRetrytime()); + if (to.getPinLifetime() != null) { + data.setPinLifetime(TLifeTimeInSeconds + .make(PinLifetimeConverter.getInstance().toStoRM(to.getPinLifetime()), TimeUnit.SECONDS)); + } + data.setSpaceToken(to.getSpaceToken()); + data.setStatus(status); + data.setErrstring(to.getErrstring()); + data.setRemainingTotalTime(to.getRemainingTotalTime()); + data.setNbreqfiles(to.getNbreqfiles()); + data.setNumOfCompleted(to.getNumOfCompleted()); + if (to.getFileLifetime() != null) { + data.setFileLifetime(TLifeTimeInSeconds.make(to.getFileLifetime(), TimeUnit.SECONDS)); + } + + data.setDeferredStartTime(to.getDeferredStartTime()); + data.setNumOfWaiting(to.getNumOfWaiting()); + data.setNumOfFailed(to.getNumOfFailed()); + data.setRemainingDeferredStartTime(to.getRemainingDeferredStartTime()); + return data; + } + + /** + * Private method that holds the logic for creating a VomsGridUser from persistence and to load + * any available Proxy. For the moment the VOMS attributes present in persistence are NOT loaded! + */ + private GridUserInterface loadVomsGridUser(String dn, String fqansString) + throws MalformedGridUserException { + + log.debug("load VomsGridUser for dn='{}' and fqansString='{}'", dn, fqansString); + + if (dn == null) { + throw new MalformedGridUserException("Invalid null DN"); + } + if (fqansString == null || fqansString.isEmpty()) { + return GridUserManager.makeGridUser(dn); + } + + FQAN[] fqans = new FQAN[fqansString.split("#").length]; + int i = 0; + for (String fqan : fqansString.split("#")) { + fqans[i++] = new FQAN(fqan); + } + try { + return GridUserManager.makeVOMSGridUser(dn, fqans); + } catch (IllegalArgumentException e) { + log.error("Unexpected error on voms grid user creation. " + "IllegalArgumentException: {}", + e.getMessage(), e); + throw new MalformedGridUserException(e.getMessage()); + } + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatus(TRequestToken rt, TReturnStatus status) { + + dao.updateGlobalStatus(rt, status.getStatusCode(), status.getExplanation()); + } + + public void updateFromPreviousGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + dao.updateGlobalStatusOnMatchingGlobalStatus(requestToken, expectedStatusCode, newStatusCode, + explanation); + } + + /** + * Method used to update the global status of a request identified by TRequestToken, to the + * supplied TReturnStatus. The pin lifetime and the file lifetime are updated in order to start + * the countdown from the moment the status is updated. In case of any exception nothing happens. + */ + synchronized public void updateGlobalStatusPinFileLifetime(TRequestToken rt, + TReturnStatus status) { + + dao.updateGlobalStatusPinFileLifetime(rt, status.getStatusCode(), status.getExplanation()); + } + + /** + * Method used to change the global status of the supplied request to SRM_FAILURE, as well as that + * of each single chunk in the request. If the request type is not supported by the logic, only + * the global status is updated and an error log gets written warning of the unsupported business + * logic. + */ + public synchronized void failRequest(RequestSummaryData rsd, String explanation) { + + Preconditions.checkNotNull(rsd); + TRequestType rtype = rsd.requestType(); + if (PREPARE_TO_GET.equals(rtype)) { + dao.failPtGRequest(rsd.primaryKey(), explanation); + } else if (PREPARE_TO_PUT.equals(rtype)) { + dao.failPtPRequest(rsd.primaryKey(), explanation); + } else { + dao.failRequest(rsd.primaryKey(), explanation); + } + } + + /** + * Method used to abort a request that HAS been fetched for processing; abort is only applied to + * those SURLs of the request specified in the Collection; if the status of the request associated + * to the supplied request token is different from SRM_REQUEST_INPROGRESS, then nothing takes + * place; likewise if the supplied token does not correspond to any request, if it is null, if the + * Collection is null, or the Collection does not contain TSURLs. + */ + synchronized public void abortChunksOfInProgressRequest(TRequestToken rt, + Collection tsurls) { + + if ((rt != null) && (tsurls != null) && (!tsurls.isEmpty())) { + try { + List aux = new ArrayList(); + for (TSURL tsurl : tsurls) { + aux.add(tsurl.toString()); + } + dao.abortChunksOfInProgressRequest(rt, aux); + } catch (ClassCastException e) { + log.error("REQUEST SUMMARY CATALOG! Unexpected error in " + + "abortChunksOfInProgressRequest: the supplied Collection did not " + + "contain TSURLs! Error: {}", e.getMessage()); + } + } + } + + synchronized public RequestSummaryData find(TRequestToken requestToken) + throws IllegalArgumentException { + + if (requestToken == null || requestToken.toString().trim().isEmpty()) { + throw new IllegalArgumentException( + "Unable to perform find, illegal arguments: requestToken=" + requestToken); + } + RequestSummaryDataTO to = dao.find(requestToken); + if (to != null) { + try { + RequestSummaryData data = makeOne(to); + if (data != null) { + log.debug("REQUEST SUMMARY CATALOG: {} associated to {} retrieved", data.requestToken(), + data.gridUser().getDn()); + return data; + } + } catch (IllegalArgumentException e) { + log.error("REQUEST SUMMARY CATALOG; Failure performing makeOne operation. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.debug("REQUEST SUMMARY CATALOG: {} token not found", requestToken); + } + return null; + } + + /** + * Method that returns the TRequestType associated to the request with the supplied TRequestToken. + * If no request exists with that token, or the type cannot be established from the DB, or the + * supplied token is null, then an EMPTY TRequestType is returned. + */ + synchronized public TRequestType typeOf(TRequestToken rt) { + + TRequestType result = TRequestType.EMPTY; + if (rt != null) { + result = dao.getRequestType(rt); + } + return result; + } + + /** + * Method used to abort a request that HAS been fetched for processing; if the status of the + * request associated to the supplied request token tok is different from SRM_REQUEST_INPROGRESS, + * then nothing takes place; likewise if the supplied token does not correspond to any request, or + * if it is null. + */ + synchronized public void abortInProgressRequest(TRequestToken rt) { + + if (rt != null) { + dao.abortInProgressRequest(rt); + } + } } diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java deleted file mode 100644 index 1680e4355..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDAO.java +++ /dev/null @@ -1,1390 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TStatusCode; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for RequestSummaryCatalog. This DAO is specifically designed to - * connect to a MySQL DB. - * - * @author EGRID ICTP - * @version 3.0 - * @date May 2005 - */ -public class RequestSummaryDAO { - - private static final Logger log = LoggerFactory - .getLogger(RequestSummaryDAO.class); - - /** String with the name of the class for the DB driver */ - private final String driver = Configuration.getInstance().getDBDriver(); - /** String referring to the URL of the DB */ - private final String url = Configuration.getInstance().getDBURL(); - /** String with the password for the DB */ - private final String password = Configuration.getInstance().getDBPassword(); - /** String with the name for the DB */ - private final String name = Configuration.getInstance().getDBUserName(); - /** maximum number of requests that will be retrieved */ - private int limit; - /** Connection to DB - WARNING!!! It is kept open all the time! */ - private Connection con = null; - - /** milliseconds that must pass before reconnecting to DB */ - private final long period = Configuration.getInstance() - .getDBReconnectPeriod() * 1000; - /** initial delay in milliseconds before starting timer */ - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - /** timer thread that will run a task to alert when reconnecting is necessary! */ - private Timer clock = null; - /** - * timer task that will update the boolean signaling that a reconnection is - * needed! - */ - private TimerTask clockTask = null; - /** boolean that tells whether reconnection is needed because of MySQL bug! */ - private boolean reconnect = false; - - private static final RequestSummaryDAO dao = new RequestSummaryDAO(); - - private RequestSummaryDAO() { - - int aux = Configuration.getInstance().getPickingMaxBatchSize(); - if (aux > 1) { - limit = aux; - } else { - limit = 1; - } - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of the RequestSummaryDAO. - */ - public static RequestSummaryDAO getInstance() { - - return dao; - } - - /** - * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved - * requests are limited to the number specified by the Configuration method - * getPicker2MaxBatchSize. All retrieved requests get their global status - * transited to SRM_REQUEST_INPROGRESS. A Collection of RequestSummaryDataTO - * is returned: if none are found, an empty collection is returned. - */ - public Collection findNew(int freeSlot) { - - PreparedStatement stmt = null; - ResultSet rs = null; - List list = Lists.newArrayList(); - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - findNew: unable to get a valid connection!"); - return list; - } - // RequestSummaryDataTO - try { - // start transaction - con.setAutoCommit(false); - - int howMuch = -1; - if (freeSlot > limit) { - howMuch = limit; - } else { - howMuch = freeSlot; - } - - String query = "SELECT ID, config_RequestTypeID, r_token, timeStamp, " - + "client_dn, proxy FROM request_queue WHERE status=? LIMIT ?"; - - // get id, request type, request token and client_DN of newly added - // requests, which must be in SRM_REQUEST_QUEUED state - stmt = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - stmt.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - stmt.setInt(2, howMuch); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - List rowids = new ArrayList(); // arraylist with selected ids - RequestSummaryDataTO aux = null; // RequestSummaryDataTO made from - // retrieved row - long auxid; // primary key of retrieved row - while (rs.next()) { - auxid = rs.getLong("ID"); - rowids.add(Long.valueOf(auxid)); - aux = new RequestSummaryDataTO(); - aux.setPrimaryKey(auxid); - aux.setRequestType(rs.getString("config_RequestTypeID")); - aux.setRequestToken(rs.getString("r_token")); - aux.setClientDN(rs.getString("client_dn")); - aux.setTimestamp(rs.getTimestamp("timeStamp")); - - /** - * This code is only for the 1.3.18. This is a workaround to get FQANs - * using the proxy field on request_queue. The FE use the proxy field of - * request_queue to insert a single FQAN string containing all FQAN - * separated by the "#" char. The proxy is a BLOB, hence it has to be - * properly converted in string. - */ - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - aux.setVomsAttributes(new String(bdata)); - } - - list.add(aux); - } - close(rs); - close(stmt); - - // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS - if (!list.isEmpty()) { - logWarnings(con.getWarnings()); - String where = makeWhereString(rowids); - String update = "UPDATE request_queue SET status=" - + StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_REQUEST_INPROGRESS) + ", errstring=?" - + " WHERE ID IN " + where; - stmt = con.prepareStatement(update); - logWarnings(stmt.getWarnings()); - stmt.setString(1, "Request handled!"); - logWarnings(stmt.getWarnings()); - log.trace("REQUEST SUMMARY DAO - findNew: executing {}", stmt); - stmt.executeUpdate(); - close(stmt); - } - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " - + "Error: {}. Rolling back!", e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - // return collection of requests - if (!list.isEmpty()) { - log.debug("REQUEST SUMMARY DAO - findNew: returning {}", list); - } - return list; - } - - /** - * Method used to signal in the DB that a request failed: the status of the - * request identified by the primary key index is transited to SRM_FAILURE, - * with the supplied explanation String. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failRequest: unable to get a valid connection!"); - return; - } - String signalSQL = "UPDATE request_queue r " + "SET r.status=" - + StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_FAILURE) - + ", r.errstring=? " + "WHERE r.ID=?"; - PreparedStatement signal = null; - try { - signal = con.prepareStatement(signalSQL); - logWarnings(con.getWarnings()); - signal.setString(1, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(signal.getWarnings()); - signal.setLong(2, index); - logWarnings(signal.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", signal); - signal.executeUpdate(); - logWarnings(signal.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " - + "ID {} to SRM_FAILURE! Error: {}", index, e.getMessage(), e); - } finally { - close(signal); - } - } - - /** - * Method used to signal in the DB that a PtGRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messages get logged. - */ - public void failPtGRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtGRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Get s JOIN (request_queue r, request_Get g) ON s.request_GetID=g.ID AND g.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtG request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a PtPRequest failed. The global status - * transits to SRM_FAILURE, as well as that of each chunk associated to the - * request. The supplied explanation string is used both for the global status - * as well as for each individual chunk. The supplied index is the primary key - * of the global request. In case of any error, nothing gets done and no - * exception is thrown, but proper error messagges get logged. - */ - public void failPtPRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failPtPRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Put s JOIN (request_queue r, request_Put p) ON s.request_PutID=p.ID AND p.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit PtP request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to signal in the DB that a CopyRequest failed. The global - * status transits to SRM_FAILURE, as well as that of each chunk associated to - * the request. The supplied explanation string is used both for the global - * status as well as for each individual chunk. The supplied index is the - * primary key of the global request. In case of any error, nothing gets done - * and no exception is thrown, but proper error messagges get logged. - */ - public void failCopyRequest(long index, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - failCopyRequest: unable to get a valid connection!"); - return; - } - String requestSQL = "UPDATE request_queue r " - + "SET r.status=?, r.errstring=? " + "WHERE r.ID=?"; - String chunkSQL = "UPDATE " - + "status_Copy s JOIN (request_queue r, request_Copy c) ON s.request_CopyID=c.ID AND c.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - PreparedStatement request = null; - PreparedStatement chunk = null; - int failCode = StatusCodeConverter.getInstance().toDB( - TStatusCode.SRM_FAILURE); - try { - // start transaction - con.setAutoCommit(false); - - // update global status - request = con.prepareStatement(requestSQL); - logWarnings(con.getWarnings()); - request.setInt(1, failCode); - logWarnings(request.getWarnings()); - request.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(request.getWarnings()); - request.setLong(3, index); - logWarnings(request.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", request); - request.executeUpdate(); - logWarnings(request.getWarnings()); - - // update each chunk status - chunk = con.prepareStatement(chunkSQL); - logWarnings(con.getWarnings()); - chunk.setInt(1, failCode); - logWarnings(chunk.getWarnings()); - chunk.setString(2, explanation); // Prepared statement spares - // DB-specific String notation! - logWarnings(chunk.getWarnings()); - chunk.setLong(3, index); - logWarnings(chunk.getWarnings()); - log.trace("REQUEST SUMMARY DAO! failCopyRequest executing: {}", chunk); - chunk.executeUpdate(); - logWarnings(chunk.getWarnings()); - - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Unable to transit Copy request identified " - + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", index, - e.getMessage(), e); - rollback(con); - } finally { - close(request); - close(chunk); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String. If the supplied request token does not exist, nothing - * happens. - */ - public void updateGlobalStatus(String rt, int status, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatus: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"); - logWarnings(con.getWarnings()); - update.setInt(1, status); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, rt); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - public void updateGlobalStatusOnMatchingGlobalStatus( - TRequestToken requestToken, TStatusCode expectedStatusCode, - TStatusCode newStatusCode, String explanation) { - - if (!checkConnection()) { - log - .error("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - try { - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - update.setInt(1, StatusCodeConverter.getInstance().toDB(newStatusCode)); - logWarnings(update.getWarnings()); - update.setString(2, explanation); - logWarnings(update.getWarnings()); - update.setString(3, requestToken.toString()); - logWarnings(update.getWarnings()); - update.setInt(4, - StatusCodeConverter.getInstance().toDB(expectedStatusCode)); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: " - + "executing {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to update the global status of the request identified by the - * RequestToken rt. It gets updated the supplied status, with the supplied - * explanation String and pin and file lifetimes are updated in order to start - * the countdown from now. If the supplied request token does not exist, - * nothing happens. - */ - public void updateGlobalStatusPinFileLifetime(String rt, int status, - String explanation) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - updateGlobalStatusPinFileLifetime: " - + "unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - - String query = "UPDATE request_queue SET status=?, errstring=?, " - + "pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " - + "WHERE r_token=?"; - - try { - update = con.prepareStatement(query); - logWarnings(con.getWarnings()); - - update.setInt(1, status); - logWarnings(update.getWarnings()); - - update.setString(2, explanation); - logWarnings(update.getWarnings()); - - update.setString(3, rt); - logWarnings(update.getWarnings()); - - log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); - - update.executeUpdate(); - logWarnings(update.getWarnings()); - - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); - } finally { - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_QUEUED state, then - * nothing happens. - */ - public void abortRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - statusTable = "status_Get"; - requestTable = "request_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t) ON (s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID) " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " - + "could not update file statuses because the request type could " - + "not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. All files associated with the - * request will also get their status changed to SRM_ABORTED. If the supplied - * token is null, or not found, or not in the SRM_REQUEST_INPROGRESS state, - * then nothing happens. - */ - public void abortInProgressRequest(String rt) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: unable to get " - + "a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - // token found... - // get ID - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update global request status - update = con - .prepareStatement("UPDATE request_queue SET status=?, errstring=? WHERE ID=?"); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - close(update); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID )" - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=?"; - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortInProgressRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_QUEUED state, to SRM_ABORTED. If the supplied token is null, or - * not found, or not in the SRM_REQUEST_QUEUED state, then nothing happens. - */ - public void abortChunksOfRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: unable to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfRequest - {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfRequest: could not update file statuses because " - + "the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Method used to transit the status of chunks of a request that is in - * SRM_REQUEST_INPROGRESS state, to SRM_ABORTED. If the supplied token is - * null, or not found, or not in the SRM_REQUEST_INPROGRESS state, then - * nothing happens. - */ - public void abortChunksOfInProgressRequest(String rt, Collection surls) { - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: unable " - + "to get a valid connection!"); - return; - } - PreparedStatement update = null; - PreparedStatement query = null; - ResultSet rs = null; - try { - query = con - .prepareStatement("SELECT ID,config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - query.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - long id = rs.getLong("ID"); - String type = rs.getString("config_RequestTypeID"); - // update single chunk file statuses - TRequestType rtyp = RequestTypeConverter.getInstance().toSTORM(type); - String statusTable = null; - String requestTable = null; - String joinColumn = null; - String surlColumn = null; - if (rtyp != TRequestType.EMPTY) { - if (rtyp == TRequestType.PREPARE_TO_GET) { - requestTable = "request_Get"; - statusTable = "status_Get"; - joinColumn = "request_GetID"; - surlColumn = "sourceSURL"; - } else if (rtyp == TRequestType.PREPARE_TO_PUT) { - requestTable = "request_Put"; - statusTable = "status_Put"; - joinColumn = "request_PutID"; - surlColumn = "targetSURL"; - } else if (rtyp == TRequestType.COPY) { - requestTable = "request_Copy"; - statusTable = "status_Copy"; - joinColumn = "request_CopyID"; - surlColumn = "targetSURL"; - } else { - requestTable = "request_BoL"; - statusTable = "status_BoL"; - joinColumn = "request_BoLID"; - surlColumn = "sourceSURL"; - } - String auxstr = "UPDATE " + statusTable - + " s JOIN (request_queue r, " + requestTable + " t ON s." - + joinColumn + "=t.ID AND t.request_queueID=r.ID " - + "SET s.statusCode=?, s.explanation=? " + "WHERE r.ID=? AND " - + surlColumn + " IN " + makeInString(surls); - update = con.prepareStatement(auxstr); - logWarnings(con.getWarnings()); - update.setInt(1, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_ABORTED)); - logWarnings(update.getWarnings()); - update.setString(2, "User aborted request!"); - logWarnings(update.getWarnings()); - update.setLong(3, id); - logWarnings(update.getWarnings()); - log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest " - + "- {}", update); - update.executeUpdate(); - logWarnings(update.getWarnings()); - } else { - log.error("REQUEST SUMMARY DAO - Unable to complete " - + "abortChunksOfInProgressRequest: could not update file statuses " - + "because the request type could not be translated from the DB!"); - } - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", - e.getMessage(), e); - } finally { - close(rs); - close(query); - close(update); - } - } - - /** - * Private method that returns a String of all SURLS in the collection of - * String. - */ - private String makeInString(Collection c) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public String typeOf(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - String result = ""; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - typeOf: unable to get a valid connection!"); - return result; - } - try { - query = con - .prepareStatement("SELECT config_RequestTypeID from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - logWarnings(query.getWarnings()); - log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (rs.next()) { - result = rs.getString("config_RequestTypeID"); - } - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return result; - } - - /** - * Method that returns the config_RequestTypeID field present in request_queue - * table, for the request with the specified request token rt. In case of any - * error, the empty String "" is returned. - */ - public RequestSummaryDataTO find(String rt) { - - PreparedStatement query = null; - ResultSet rs = null; - RequestSummaryDataTO to = null; - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - find: unable to get a valid connection!"); - return null; - } - try { - query = con - .prepareStatement("SELECT * from request_queue WHERE r_token=?"); - logWarnings(con.getWarnings()); - query.setString(1, rt); - con.setAutoCommit(false); - - rs = query.executeQuery(); - logWarnings(query.getWarnings()); - if (!rs.first()) { - log.debug("No requests found with token {}", rt); - return null; - } - to = new RequestSummaryDataTO(); - to.setPrimaryKey(rs.getLong("ID")); - to.setRequestType(rs.getString("config_RequestTypeID")); - to.setClientDN(rs.getString("client_dn")); - to.setUserToken(rs.getString("u_token")); - to.setRetrytime(rs.getInt("retrytime")); - to.setPinLifetime(rs.getInt("pinLifetime")); - to.setSpaceToken(rs.getString("s_token")); - to.setStatus(rs.getInt("status")); - to.setErrstring(rs.getString("errstring")); - to.setRequestToken(rs.getString("r_token")); - to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); - to.setFileLifetime(rs.getInt("fileLifetime")); - to.setNbreqfiles(rs.getInt("nbreqfiles")); - to.setNumOfCompleted(rs.getInt("numOfCompleted")); - to.setNumOfWaiting(rs.getInt("numOfWaiting")); - to.setNumOfFailed(rs.getInt("numOfFailed")); - to.setTimestamp(rs.getTimestamp("timeStamp")); - - - java.sql.Blob blob = rs.getBlob("proxy"); - if (blob != null) { - byte[] bdata = blob.getBytes(1, (int) blob.length()); - to.setVomsAttributes(new String(bdata)); - } - to.setDeferredStartTime(rs.getInt("deferredStartTime")); - to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); - - if (rs.next()) { - log.warn("More than a row matches token {}", rt); - } - close(rs); - close(query); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); - } finally { - close(rs); - close(query); - } - return to; - } - - /** - * Method that purges expired requests: it only removes up to a fixed value of - * expired requests at a time. The value is configured and obtained from the - * configuration property getPurgeBatchSize. A List of Strings with the - * request tokens removed is returned. In order to completely remove all - * expired requests, simply keep invoking this method until an empty List is - * returned. This batch processing is needed because there could be millions - * of expired requests which are likely to result in out-of-memory problems. - * Notice that in case of errors only error messages get logged. An empty List - * is also returned. - */ - public List purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - - PreparedStatement ps = null; - ResultSet rs = null; - List requestTokens = Lists.newArrayList(); - List ids = Lists.newArrayList(); - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests: unable to get a " - + "valid connection!"); - return requestTokens; - } - - try { - // start transaction - con.setAutoCommit(false); - String stmt = "SELECT ID, r_token FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; - ps = con.prepareStatement(stmt); - ps.setLong(1, expiredRequestTime); - ps.setInt(2, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - ps.setInt(4, purgeSize); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", ps); - - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - while (rs.next()) { - requestTokens.add(rs.getString("r_token")); - ids.add(new Long(rs.getLong("ID"))); - } - - close(rs); - close(ps); - - if (!ids.isEmpty()) { - // REMOVE BATCH OF EXPIRED REQUESTS! - stmt = "DELETE FROM request_queue WHERE ID in " + makeWhereString(ids); - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - - int deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted " - + "expired requests."); - } - - close(ps); - - stmt = "DELETE request_DirOption FROM request_DirOption " - + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" - + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " - + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" - + " WHERE request_Copy.request_DirOptionID IS NULL AND" - + " request_Get.request_DirOptionID IS NULL AND" - + " request_BoL.request_DirOptionID IS NULL;"; - - ps = con.prepareStatement(stmt); - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", stmt); - deleted = ps.executeUpdate(); - logWarnings(ps.getWarnings()); - - if (deleted > 0) { - log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " - + "DirOption related to expired requests.", deleted); - } else { - log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " - + "DirOption related to expired requests."); - } - close(ps); - - } - // commit and finish transaction - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); - logWarnings(con.getWarnings()); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - return requestTokens; - } - - /** - * Retrieve the total number of expired requests. - * - * @return - */ - public int getNumberExpired() { - - int rowCount = 0; - - if (!checkConnection()) { - log.error("REQUEST SUMMARY DAO - getNumberExpired: unable to get a " - + "valid connection!"); - return 0; - } - - PreparedStatement ps = null; - ResultSet rs = null; - - try { - // start transaction - con.setAutoCommit(false); - - String stmt = "SELECT count(*) FROM request_queue WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; - ps = con.prepareStatement(stmt); - ps.setLong(1, Configuration.getInstance().getExpiredRequestTime()); - ps.setInt(2, - StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED)); - ps.setInt(3, StatusCodeConverter.getInstance() - .toDB(TStatusCode.SRM_REQUEST_INPROGRESS)); - - logWarnings(con.getWarnings()); - log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); - rs = ps.executeQuery(); - logWarnings(ps.getWarnings()); - - // Get the number of rows from the result set - rs.next(); - rowCount = rs.getInt(1); - log.debug("Nr of expired requests is: {}", rowCount); - - close(rs); - close(ps); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back " - + "because of error: {}", e.getMessage(), e); - rollback(con); - } finally { - close(rs); - close(ps); - } - - return rowCount; - - } - - /** - * Private method that returns a String of all IDs retrieved by the last - * SELECT. - */ - private String makeWhereString(List rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method that sets up the connection to the DB, as well as the - * prepared statement. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - logWarnings(con.getWarnings()); - response = con.isValid(0); - } catch (ClassNotFoundException | SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in setUpConnection! {}", e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that tales down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (SQLException e) { - log.error("REQUEST SUMMARY DAO! Exception in takeDownConnection " - + "method: {}", e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method used to close a Statement - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close Statement {} - " - + "Error: {}", stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to close a ResultSet - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("REQUEST SUMMARY DAO! Unable to close ResultSet! Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to roll back a transaction - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("PICKER2: roll back successful!"); - } catch (SQLException e2) { - log.error("PICKER2: roll back failed! {}", e2.getMessage(), e2); - } - } - } - - /** - * Private auxiliary method used to log SQLWarnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("REQUEST SUMMARY DAO: {}", warning); - } - } - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java deleted file mode 100644 index 4921fe128..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryData.java +++ /dev/null @@ -1,540 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; -import it.grid.storm.srm.types.TReturnStatus; -// import it.grid.storm.griduser.VomsGridUser; -import it.grid.storm.griduser.GridUserInterface; - -/** - * This class represents the SummaryData associated with the SRM request. It - * contains info about: Primary Key of request, TRequestType, TRequestToken, - * VomsGridUser. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 4.0 - */ -public class RequestSummaryData { - - private TRequestType requestType = null; // request type of SRM request - private TRequestToken requestToken = null; // TRequestToken of SRM request - private GridUserInterface gu = null; // VomsGridUser that issued This request - private long id = -1; // long representing This object in persistence - - private String userToken = null; - private Integer retrytime = null; - private TLifeTimeInSeconds pinLifetime = null; - private String spaceToken = null; - private TReturnStatus status = null; - private String errstring = null; - private Integer remainingTotalTime = null; - private Integer nbreqfiles = null; - private Integer numOfCompleted = null; - private TLifeTimeInSeconds fileLifetime = null; - private Integer deferredStartTime = null; - private Integer numOfWaiting = null; - private Integer numOfFailed = null; - private Integer remainingDeferredStartTime = null; - - public RequestSummaryData(TRequestType rtype, TRequestToken rtoken, - GridUserInterface gu) throws InvalidRequestSummaryDataAttributesException { - - boolean ok = rtype != null && rtoken != null && gu != null; - if (!ok) - throw new InvalidRequestSummaryDataAttributesException(rtype, rtoken, gu); - this.requestType = rtype; - this.requestToken = rtoken; - this.gu = gu; - } - - /** - * Method that returns the type of SRM request - */ - public TRequestType requestType() { - - return requestType; - } - - /** - * Method that returns the SRM request TRequestToken - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the VomsGridUser that issued this request - */ - public GridUserInterface gridUser() { - - return gu; - } - - /** - * Method that returns a long corresponding to the identifier of This object - * in persistence. - */ - public long primaryKey() { - - return id; - } - - /** - * Method used to set the log corresponding to the identifier of This object - * in persistence. - */ - public void setPrimaryKey(long l) { - - this.id = l; - } - - /** - * @return the userToken - */ - public String getUserToken() { - - return userToken; - } - - /** - * @return the retrytime - */ - public Integer getRetrytime() { - - return retrytime; - } - - /** - * @return the pinLifetime - */ - public TLifeTimeInSeconds getPinLifetime() { - - return pinLifetime; - } - - /** - * @return the spaceToken - */ - public String getSpaceToken() { - - return spaceToken; - } - - /** - * @return the status - */ - public TReturnStatus getStatus() { - - return status; - } - - /** - * @return the errstring - */ - public String getErrstring() { - - return errstring; - } - - /** - * @return the remainingTotalTime - */ - public Integer getRemainingTotalTime() { - - return remainingTotalTime; - } - - /** - * @return the nbreqfiles - */ - public Integer getNbreqfiles() { - - return nbreqfiles; - } - - /** - * @return the numOfCompleted - */ - public Integer getNumOfCompleted() { - - return numOfCompleted; - } - - /** - * @return the fileLifetime - */ - public TLifeTimeInSeconds getFileLifetime() { - - return fileLifetime; - } - - /** - * @return the deferredStartTime - */ - public Integer getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * @return the numOfWaiting - */ - public Integer getNumOfWaiting() { - - return numOfWaiting; - } - - /** - * @return the numOfFailed - */ - public Integer getNumOfFailed() { - - return numOfFailed; - } - - /** - * @return the remainingDeferredStartTime - */ - public Integer getRemainingDeferredStartTime() { - - return remainingDeferredStartTime; - } - - public void setUserToken(String userToken) { - - this.userToken = userToken; - } - - public void setRetrytime(Integer retrytime) { - - this.retrytime = retrytime; - - } - - public void setPinLifetime(TLifeTimeInSeconds pinLifetime) { - - this.pinLifetime = pinLifetime; - - } - - public void setSpaceToken(String spaceToken) { - - this.spaceToken = spaceToken; - - } - - public void setStatus(TReturnStatus status) { - - this.status = status; - - } - - public void setErrstring(String errstring) { - - this.errstring = errstring; - - } - - public void setRemainingTotalTime(Integer remainingTotalTime) { - - this.remainingTotalTime = remainingTotalTime; - - } - - public void setNbreqfiles(Integer nbreqfiles) { - - this.nbreqfiles = nbreqfiles; - - } - - public void setNumOfCompleted(Integer numOfCompleted) { - - this.numOfCompleted = numOfCompleted; - - } - - public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { - - this.fileLifetime = fileLifetime; - - } - - public void setDeferredStartTime(Integer deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - - } - - public void setNumOfWaiting(Integer numOfWaiting) { - - this.numOfWaiting = numOfWaiting; - - } - - public void setNumOfFailed(Integer numOfFailed) { - - this.numOfFailed = numOfFailed; - - } - - public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { - - this.remainingDeferredStartTime = remainingDeferredStartTime; - - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("RequestSummaryData [requestType="); - builder.append(requestType); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", gu="); - builder.append(gu); - builder.append(", id="); - builder.append(id); - builder.append(", userToken="); - builder.append(userToken); - builder.append(", retrytime="); - builder.append(retrytime); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", status="); - builder.append(status); - builder.append(", errstring="); - builder.append(errstring); - builder.append(", remainingTotalTime="); - builder.append(remainingTotalTime); - builder.append(", nbreqfiles="); - builder.append(nbreqfiles); - builder.append(", numOfCompleted="); - builder.append(numOfCompleted); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", deferredStartTime="); - builder.append(deferredStartTime); - builder.append(", numOfWaiting="); - builder.append(numOfWaiting); - builder.append(", numOfFailed="); - builder.append(numOfFailed); - builder.append(", remainingDeferredStartTime="); - builder.append(remainingDeferredStartTime); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result - + ((deferredStartTime == null) ? 0 : deferredStartTime.hashCode()); - result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); - result = prime * result - + ((fileLifetime == null) ? 0 : fileLifetime.hashCode()); - result = prime * result + ((gu == null) ? 0 : gu.hashCode()); - result = prime * result + (int) (id ^ (id >>> 32)); - result = prime * result - + ((nbreqfiles == null) ? 0 : nbreqfiles.hashCode()); - result = prime * result - + ((numOfCompleted == null) ? 0 : numOfCompleted.hashCode()); - result = prime * result - + ((numOfFailed == null) ? 0 : numOfFailed.hashCode()); - result = prime * result - + ((numOfWaiting == null) ? 0 : numOfWaiting.hashCode()); - result = prime * result - + ((pinLifetime == null) ? 0 : pinLifetime.hashCode()); - result = prime - * result - + ((remainingDeferredStartTime == null) ? 0 : remainingDeferredStartTime - .hashCode()); - result = prime * result - + ((remainingTotalTime == null) ? 0 : remainingTotalTime.hashCode()); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - result = prime * result - + ((requestType == null) ? 0 : requestType.hashCode()); - result = prime * result + ((retrytime == null) ? 0 : retrytime.hashCode()); - result = prime * result - + ((spaceToken == null) ? 0 : spaceToken.hashCode()); - result = prime * result + ((status == null) ? 0 : status.hashCode()); - result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RequestSummaryData other = (RequestSummaryData) obj; - if (deferredStartTime == null) { - if (other.deferredStartTime != null) { - return false; - } - } else if (!deferredStartTime.equals(other.deferredStartTime)) { - return false; - } - if (errstring == null) { - if (other.errstring != null) { - return false; - } - } else if (!errstring.equals(other.errstring)) { - return false; - } - if (fileLifetime == null) { - if (other.fileLifetime != null) { - return false; - } - } else if (!fileLifetime.equals(other.fileLifetime)) { - return false; - } - if (gu == null) { - if (other.gu != null) { - return false; - } - } else if (!gu.equals(other.gu)) { - return false; - } - if (id != other.id) { - return false; - } - if (nbreqfiles == null) { - if (other.nbreqfiles != null) { - return false; - } - } else if (!nbreqfiles.equals(other.nbreqfiles)) { - return false; - } - if (numOfCompleted == null) { - if (other.numOfCompleted != null) { - return false; - } - } else if (!numOfCompleted.equals(other.numOfCompleted)) { - return false; - } - if (numOfFailed == null) { - if (other.numOfFailed != null) { - return false; - } - } else if (!numOfFailed.equals(other.numOfFailed)) { - return false; - } - if (numOfWaiting == null) { - if (other.numOfWaiting != null) { - return false; - } - } else if (!numOfWaiting.equals(other.numOfWaiting)) { - return false; - } - if (pinLifetime == null) { - if (other.pinLifetime != null) { - return false; - } - } else if (!pinLifetime.equals(other.pinLifetime)) { - return false; - } - if (remainingDeferredStartTime == null) { - if (other.remainingDeferredStartTime != null) { - return false; - } - } else if (!remainingDeferredStartTime - .equals(other.remainingDeferredStartTime)) { - return false; - } - if (remainingTotalTime == null) { - if (other.remainingTotalTime != null) { - return false; - } - } else if (!remainingTotalTime.equals(other.remainingTotalTime)) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - if (requestType != other.requestType) { - return false; - } - if (retrytime == null) { - if (other.retrytime != null) { - return false; - } - } else if (!retrytime.equals(other.retrytime)) { - return false; - } - if (spaceToken == null) { - if (other.spaceToken != null) { - return false; - } - } else if (!spaceToken.equals(other.spaceToken)) { - return false; - } - if (status == null) { - if (other.status != null) { - return false; - } - } else if (!status.equals(other.status)) { - return false; - } - if (userToken == null) { - if (other.userToken != null) { - return false; - } - } else if (!userToken.equals(other.userToken)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java b/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java deleted file mode 100644 index 76dabb7bb..000000000 --- a/src/main/java/it/grid/storm/catalogs/RequestSummaryDataTO.java +++ /dev/null @@ -1,540 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.sql.Timestamp; - -/** - * Class that represents data of an asynchrnous Request, regardless of whether - * it is a Put, Get or Copy, in the Persistence Layer: this is all raw data - * referring to the request proper, that is, String and primitive types. - * - * @author EGRID ICTP - * @version 2.0 - * @date June 2005 - */ -public class RequestSummaryDataTO { - - public static final String PTG_REQUEST_TYPE = "PTG"; - public static final String PTP_REQUEST_TYPE = "PTP"; - public static final String BOL_REQUEST_TYPE = "BOL"; - public static final String COPY_REQUEST_TYPE = "COP"; - - private long id = -1; // id of request in persistence - private String requestType = ""; // request type - private String requestToken = ""; // request token - private String clientDN = ""; // DN that issued request - private String vomsAttributes = ""; // String containing all VOMS attributes - private Timestamp timestamp = null; - - private boolean empty = true; - private String userToken = null; - private Integer retrytime = null; - private Integer pinLifetime = null; - private String spaceToken = null; - private Integer status = null; - private String errstring = null; - private Integer remainingTotalTime = null; - private Integer nbreqfiles = null; - private Integer numOfCompleted = null; - private Integer fileLifetime = null; - private Integer deferredStartTime = null; - private Integer numOfWaiting = null; - private Integer numOfFailed = null; - private Integer remainingDeferredStartTime = null; - - public boolean isEmpty() { - - return empty; - } - - public long primaryKey() { - - return id; - } - - public void setPrimaryKey(long l) { - - empty = false; - id = l; - } - - public String requestType() { - - return requestType; - } - - public void setRequestType(String s) { - - empty = false; - requestType = s; - } - - public String requestToken() { - - return requestToken; - } - - public void setRequestToken(String s) { - - empty = false; - requestToken = s; - } - - public String clientDN() { - - return clientDN; - } - - public void setClientDN(String s) { - - empty = false; - clientDN = s; - } - - public String vomsAttributes() { - - return vomsAttributes; - } - - public void setVomsAttributes(String s) { - - empty = false; - vomsAttributes = s; - } - - public Timestamp timestamp() { - - return timestamp; - } - - public void setTimestamp(Timestamp timestamp) { - - empty = false; - this.timestamp = timestamp; - } - - /** - * @return the userToken - */ - public String getUserToken() { - - return userToken; - } - - /** - * @return the retrytime - */ - public Integer getRetrytime() { - - return retrytime; - } - - /** - * @return the pinLifetime - */ - public Integer getPinLifetime() { - - return pinLifetime; - } - - /** - * @return the spaceToken - */ - public String getSpaceToken() { - - return spaceToken; - } - - /** - * @return the status - */ - public Integer getStatus() { - - return status; - } - - /** - * @return the errstring - */ - public String getErrstring() { - - return errstring; - } - - /** - * @return the remainingTotalTime - */ - public Integer getRemainingTotalTime() { - - return remainingTotalTime; - } - - /** - * @return the nbreqfiles - */ - public Integer getNbreqfiles() { - - return nbreqfiles; - } - - /** - * @return the numOfCompleted - */ - public Integer getNumOfCompleted() { - - return numOfCompleted; - } - - /** - * @return the fileLifetime - */ - public Integer getFileLifetime() { - - return fileLifetime; - } - - /** - * @return the deferredStartTime - */ - public Integer getDeferredStartTime() { - - return deferredStartTime; - } - - /** - * @return the numOfWaiting - */ - public Integer getNumOfWaiting() { - - return numOfWaiting; - } - - /** - * @return the numOfFailed - */ - public Integer getNumOfFailed() { - - return numOfFailed; - } - - /** - * @return the remainingDeferredStartTime - */ - public Integer getRemainingDeferredStartTime() { - - return remainingDeferredStartTime; - } - - public void setUserToken(String userToken) { - - this.userToken = userToken; - } - - public void setRetrytime(Integer retrytime) { - - this.retrytime = retrytime; - - } - - public void setPinLifetime(Integer pinLifetime) { - - this.pinLifetime = pinLifetime; - - } - - public void setSpaceToken(String spaceToken) { - - this.spaceToken = spaceToken; - - } - - public void setStatus(Integer status) { - - this.status = status; - - } - - public void setErrstring(String errstring) { - - this.errstring = errstring; - - } - - public void setRemainingTotalTime(Integer remainingTotalTime) { - - this.remainingTotalTime = remainingTotalTime; - - } - - public void setNbreqfiles(Integer nbreqfiles) { - - this.nbreqfiles = nbreqfiles; - - } - - public void setNumOfCompleted(Integer numOfCompleted) { - - this.numOfCompleted = numOfCompleted; - - } - - public void setFileLifetime(Integer fileLifetime) { - - this.fileLifetime = fileLifetime; - - } - - public void setDeferredStartTime(Integer deferredStartTime) { - - this.deferredStartTime = deferredStartTime; - - } - - public void setNumOfWaiting(Integer numOfWaiting) { - - this.numOfWaiting = numOfWaiting; - - } - - public void setNumOfFailed(Integer numOfFailed) { - - this.numOfFailed = numOfFailed; - - } - - public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { - - this.remainingDeferredStartTime = remainingDeferredStartTime; - - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("RequestSummaryDataTO [id="); - builder.append(id); - builder.append(", requestType="); - builder.append(requestType); - builder.append(", requestToken="); - builder.append(requestToken); - builder.append(", clientDN="); - builder.append(clientDN); - builder.append(", vomsAttributes="); - builder.append(vomsAttributes); - builder.append(", timestamp="); - builder.append(timestamp); - builder.append(", empty="); - builder.append(empty); - builder.append(", userToken="); - builder.append(userToken); - builder.append(", retrytime="); - builder.append(retrytime); - builder.append(", pinLifetime="); - builder.append(pinLifetime); - builder.append(", spaceToken="); - builder.append(spaceToken); - builder.append(", status="); - builder.append(status); - builder.append(", errstring="); - builder.append(errstring); - builder.append(", remainingTotalTime="); - builder.append(remainingTotalTime); - builder.append(", nbreqfiles="); - builder.append(nbreqfiles); - builder.append(", numOfCompleted="); - builder.append(numOfCompleted); - builder.append(", fileLifetime="); - builder.append(fileLifetime); - builder.append(", deferredStartTime="); - builder.append(deferredStartTime); - builder.append(", numOfWaiting="); - builder.append(numOfWaiting); - builder.append(", numOfFailed="); - builder.append(numOfFailed); - builder.append(", remainingDeferredStartTime="); - builder.append(remainingDeferredStartTime); - builder.append("]"); - return builder.toString(); - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((clientDN == null) ? 0 : clientDN.hashCode()); - result = prime * result - + (int) (deferredStartTime ^ (deferredStartTime >>> 32)); - result = prime * result + (empty ? 1231 : 1237); - result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); - result = prime * result + (int) (fileLifetime ^ (fileLifetime >>> 32)); - result = prime * result + (int) (id ^ (id >>> 32)); - result = prime * result + (int) (nbreqfiles ^ (nbreqfiles >>> 32)); - result = prime * result + (int) (numOfCompleted ^ (numOfCompleted >>> 32)); - result = prime * result + (int) (numOfFailed ^ (numOfFailed >>> 32)); - result = prime * result + (int) (numOfWaiting ^ (numOfWaiting >>> 32)); - result = prime * result + (int) (pinLifetime ^ (pinLifetime >>> 32)); - result = prime - * result - + (int) (remainingDeferredStartTime ^ (remainingDeferredStartTime >>> 32)); - result = prime * result - + (int) (remainingTotalTime ^ (remainingTotalTime >>> 32)); - result = prime * result - + ((requestToken == null) ? 0 : requestToken.hashCode()); - result = prime * result - + ((requestType == null) ? 0 : requestType.hashCode()); - result = prime * result + (int) (retrytime ^ (retrytime >>> 32)); - result = prime * result - + ((spaceToken == null) ? 0 : spaceToken.hashCode()); - result = prime * result + (int) (status ^ (status >>> 32)); - result = prime * result + ((timestamp == null) ? 0 : timestamp.hashCode()); - result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); - result = prime * result - + ((vomsAttributes == null) ? 0 : vomsAttributes.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - RequestSummaryDataTO other = (RequestSummaryDataTO) obj; - if (clientDN == null) { - if (other.clientDN != null) { - return false; - } - } else if (!clientDN.equals(other.clientDN)) { - return false; - } - if (deferredStartTime != other.deferredStartTime) { - return false; - } - if (empty != other.empty) { - return false; - } - if (errstring == null) { - if (other.errstring != null) { - return false; - } - } else if (!errstring.equals(other.errstring)) { - return false; - } - if (fileLifetime != other.fileLifetime) { - return false; - } - if (id != other.id) { - return false; - } - if (nbreqfiles != other.nbreqfiles) { - return false; - } - if (numOfCompleted != other.numOfCompleted) { - return false; - } - if (numOfFailed != other.numOfFailed) { - return false; - } - if (numOfWaiting != other.numOfWaiting) { - return false; - } - if (pinLifetime != other.pinLifetime) { - return false; - } - if (remainingDeferredStartTime != other.remainingDeferredStartTime) { - return false; - } - if (remainingTotalTime != other.remainingTotalTime) { - return false; - } - if (requestToken == null) { - if (other.requestToken != null) { - return false; - } - } else if (!requestToken.equals(other.requestToken)) { - return false; - } - if (requestType == null) { - if (other.requestType != null) { - return false; - } - } else if (!requestType.equals(other.requestType)) { - return false; - } - if (retrytime != other.retrytime) { - return false; - } - if (spaceToken == null) { - if (other.spaceToken != null) { - return false; - } - } else if (!spaceToken.equals(other.spaceToken)) { - return false; - } - if (status != other.status) { - return false; - } - if (timestamp == null) { - if (other.timestamp != null) { - return false; - } - } else if (!timestamp.equals(other.timestamp)) { - return false; - } - if (userToken == null) { - if (other.userToken != null) { - return false; - } - } else if (!userToken.equals(other.userToken)) { - return false; - } - if (vomsAttributes == null) { - if (other.vomsAttributes != null) { - return false; - } - } else if (!vomsAttributes.equals(other.vomsAttributes)) { - return false; - } - return true; - } - -} diff --git a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java index 675dc182f..28c8ba81c 100644 --- a/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/ReservedSpaceCatalog.java @@ -21,18 +21,6 @@ package it.grid.storm.catalogs; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.DAOFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.model.TransferObjectDecodingException; -import it.grid.storm.space.StorageSpaceData; -import it.grid.storm.srm.types.ArrayOfTSpaceToken; -import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; -import it.grid.storm.srm.types.TSpaceToken; - import java.io.File; import java.util.Calendar; import java.util.Collection; @@ -40,7 +28,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import org.slf4j.Logger; @@ -48,664 +35,454 @@ import com.google.common.collect.Lists; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.model.TransferObjectDecodingException; +import it.grid.storm.space.StorageSpaceData; +import it.grid.storm.srm.types.ArrayOfTSpaceToken; +import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; +import it.grid.storm.srm.types.TSpaceToken; + /** * */ public class ReservedSpaceCatalog { - private static final Logger log = LoggerFactory - .getLogger(ReservedSpaceCatalog.class); - private static HashSet voSA_spaceTokenSet = new HashSet(); - private static HashMap voSA_UpdateTime = new HashMap(); - - private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; - - private final DAOFactory daoFactory; - private StorageSpaceDAO ssDAO; - - /********************************************* - * STATIC METHODS - *********************************************/ - public static void addSpaceToken(TSpaceToken token) { - - voSA_spaceTokenSet.add(token); - voSA_UpdateTime.put(token, null); - } - - public static HashSet getTokenSet() { - - return voSA_spaceTokenSet; - } - - public static void clearTokenSet() { - - voSA_spaceTokenSet.clear(); - voSA_UpdateTime.clear(); - } - - public static void setUpdateTime(TSpaceToken token, Date updateTime) { - - if (voSA_UpdateTime.containsKey(token)) { - voSA_UpdateTime.put(token, updateTime); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - } - - public static Date getUpdateTime(TSpaceToken token) { - - Date result = null; - if (voSA_UpdateTime.containsKey(token)) { - result = voSA_UpdateTime.get(token); - } else { - log.warn("Failing while Trying to set update time in Catalog cache."); - } - return result; - } - - /********************************************* - * CLASS METHODS - *********************************************/ - /** - * Default constructor - */ - public ReservedSpaceCatalog() { - - log.debug("Building Reserve Space Catalog..."); - // Binding to the persistence component - daoFactory = PersistenceDirector.getDAOFactory(); - } - - /** - * Basic method used to retrieve all the information about a StorageSpace - - * StorageSpace is selected by SpaceToken - * - * @param spaceToken - * TSpaceToken - * @return StorageSpaceData, null if no-one SS exists with the specified - * spaceToken - * @throws DataAccessException - */ - public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) - throws TransferObjectDecodingException, DataAccessException { - - StorageSpaceData result = null; - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); - log.debug("Storage Space retrieved by Token. "); - if (ssTO != null) { - try { - result = new StorageSpaceData(ssTO); - } catch (IllegalArgumentException e) { - log.error("Error building StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getLocalizedMessage(), e); - throw new TransferObjectDecodingException( - "Unable to build StorageSpaceData from StorageSpaceTO"); - } - } else { - log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " - + "from the DB"); - } - return result; - } - - /** - * Create a new StorageSpace entry into the DB. It is used for - STATIC Space - * Creation - DYNAMIC Space Reservation - * - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - log.debug("ADD StorageSpace Start..."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - ssDAO.addStorageSpace(ssTO); - log.debug("StorageSpaceTO inserted in Persistence"); - } - - /** - * Update all the fields apart from the alias of a storage space row given the - * input StorageSpaceData - * - * @param ssd - * - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { - - updateStorageSpace(ssd, null); - } - - /** - * @param ssd - * @param updateTime - * - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceData ssd, Date updateTime) throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - updateTime = updateTime == null ? new Date() : updateTime; - ssTO.setUpdateTime(updateTime); - - ssDAO.updateStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } - - /** - * @param ssd - */ - public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) - throws DataAccessException { - - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - ssTO.setUpdateTime(new Date()); - ssDAO.updateStorageSpaceFreeSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - - } - - /** - * @param ssd - * @throws NoDataFoundException - * @throws InvalidRetrievedDataException - * @throws MultipleDataEntriesException - */ - public void updateAllStorageSpace(StorageSpaceData ssd) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - updateAllStorageSpace(ssd, null); - } - - /** - * Update StorageSpace. This method is used to update the StorageSpace into - * the ReserveSpace Catalog. The update operation take place after a - * AbortRequest for a PrepareToPut operation done with the spaceToken.(With or - * without the size specified). - */ - - public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) - throws NoDataFoundException, InvalidRetrievedDataException, - MultipleDataEntriesException { - - log.debug("UPDATE StorageSpace Start..."); - // Build StorageSpaceTO from SpaceData - StorageSpaceTO ssTO = new StorageSpaceTO(ssd); - log.debug("Storage Space TO Created"); - if (updateTime == null) { - // The update time of the information is now - ssTO.setUpdateTime(new Date()); - } else { - ssTO.setUpdateTime(updateTime); - } - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Add the row to the persistence.. - try { - ssDAO.updateAllStorageSpace(ssTO); - log.debug("StorageSpaceTO updated in Persistence"); - } catch (DataAccessException daEx) { - log.error("Error while inserting new row in StorageSpace: {}", - daEx.getMessage(), daEx); - } - } - - /** - * @param desc - * @return - */ - public StorageSpaceData getStorageSpaceByAlias(String desc) { - - StorageSpaceData result = null; // new StorageSpaceData(); - log.debug("Retrieve Storage Space start... "); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); - if (cl != null && !cl.isEmpty()) { - log.debug("Storage Space retrieved by Token. "); - // Build the result - try { - result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - - return result; - } - - /** - * Provides a list of storage spaces not initialized by comparing the used - * space stored against the well know not initialized value - * NOT_INITIALIZED_SIZE_VALUE - * - * @return SpaceData - */ - public List getStorageSpaceNotInitialized() { - - log.debug("Retrieve Storage Space not initialized start "); - List result = Lists.newLinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get StorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); - log.debug("Storage Space retrieved by not initialized used space. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace", daEx); - } - return result; - } - - /** - * Provides a list of storage spaces not updated since the provided timestamp - * - * @param lastUpdateTimestamp - * @return - */ - - public List getStorageSpaceByLastUpdate( - Date lastUpdateTimestamp) { - - log.debug("Retrieve Storage Space not initialized start "); - LinkedList result = new LinkedList(); - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // GetStorageSpaceTO form persistence - try { - Collection storagesSpaceTOCollection = ssDAO - .getStorageSpaceByPreviousLastUpdate(lastUpdateTimestamp); - log.debug("Storage Space retrieved by Token previous last update. "); - for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { - if (storagesSpaceTO != null) { - try { - result.add(new StorageSpaceData(storagesSpaceTO)); - } catch (IllegalArgumentException e) { - log.error("unable to build StorageSpaceData from StorageSpaceTO " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } else { - log.warn("Received a collection of StorageSpaceTO containing null " - + "elements, skipping them"); - } - } - } catch (DataAccessException daEx) { - log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } - return result; - } - - /** - * - * @param user - * VomsGridUser - * @param spaceAlias - * String - * @return ArrayOfTSpaceToken - */ - public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, - String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - - Collection listOfStorageSpace = ssDAO.getStorageSpaceByOwner(user, - spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("getSpaceTokens : Number of Storage spaces retrieved with " - + "Alias '{}': {}", spaceAlias, nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Exception while retrieving Storage Space: {}", e.getMessage(), - e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param spaceAlias - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO - .getStorageSpaceByAliasOnly(spaceAlias); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), - daEx); - } catch (Exception e) { - log.error("Error getting data! Error: {}", e.getMessage(), e); - } - return result; - } - - /** - * This method is used for the VOspaceArea Check. - * - * @param VOname - * @return - */ - - public ArrayOfTSpaceToken getSpaceTokensBySpaceType(String stype) { - - ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); - - log.debug("Retrieving space tokens..."); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - - // Get StorageSpaceTO form persistence - try { - Collection listOfStorageSpace = ssDAO.getStorageSpaceBySpaceType(stype); - - int nItems = listOfStorageSpace.size(); - log.debug("Number of Storage spaces retrieved: {}", nItems); - Iterator j_ssTO = listOfStorageSpace.iterator(); - - while (j_ssTO.hasNext()) { - StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); - try { - TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); - result.addTSpaceToken(spaceToken); - } catch (InvalidTSpaceTokenAttributesException ex2) { - log.error("Retrieved invalid Space token from DB"); - } - } - - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); - } catch (Exception e) { - log.error("Generic Error while retrieving StorageSpace: {}", e.getMessage(), e); - } - return result; - } - - // ************************ CHECH BELOW METHODS *************************** - - /** - * - * @param user - * GridUserInterface - * @param spaceToken - * TSpaceToken - * @return boolean - */ - public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { - - log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - } - boolean rowRemoved = true; - // Delete the row from persistence. - try { - ssDAO.removeStorageSpace(user, spaceToken.getValue()); - log.debug("spaceToken removed from DB."); - } catch (DataAccessException daEx) { - log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); - rowRemoved = false; - } - return rowRemoved; - } - - /** - * Method that purges the catalog, removing expired space reservation. The - * spacefile with lifetime expired are removed from the file systems. - * - */ - public void purge() { - - log.debug("Space Garbage Collector start!"); - Calendar rightNow = Calendar.getInstance(); - - // Retrieve the Data Access Object from the factory - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", daEx.getMessage(), - daEx); - } - // Get the Collection of Space Resrvation Expired - Collection expiredSpaceTO; - try { - expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); - } catch (DataAccessException e) { - // No space expired FOUND - log.debug("Space Garbage Collector: no space expired found."); - return; - } - - // For each entry expired - // 1) Delete the related space file - // 2) Remove the entry from the DB - - StorageSpaceTO spaceTO = null; - log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", - expiredSpaceTO.size()); - - for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { - spaceTO = (StorageSpaceTO) i.next(); - // Deleteing space File - String spaceFileName = spaceTO.getSpaceFile(); - File sfile = new File(spaceFileName); - log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); - - if (sfile.delete()) { - log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); - } else { - log.warn("Space Garbage Collector: problem removing {}", spaceFileName); - } - - // Removing space entry from the DB - try { - ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); - } catch (DataAccessException e) { - log.warn("Space Garbage Collector: error removing space entry from catalog."); - } - - } - - } - - public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { - - log.debug("Increase {} the used space of storage spaceToken: {}", - usedSpaceToAdd, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); - } catch (DataAccessException daEx) { - log.error( - "Error during the increase of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); - return n > 0; - } - - public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { - - log.debug("Decrease {} the used space of storage spaceToken: {}", - usedSpaceToRemove, spaceToken); - - try { - ssDAO = daoFactory.getStorageSpaceDAO(); - log.debug("Storage Space DAO retrieved."); - } catch (DataAccessException daEx) { - log.error("Error while retrieving StorageSpaceDAO: {}", - daEx.getMessage(), daEx); - return false; - } - int n = 0; - try { - n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); - } catch (DataAccessException daEx) { - log.error( - "Error during the decrease of used space for spaceToken {}: {}", - spaceToken, daEx.getMessage()); - return false; - } - if (n == 0) { - log.warn( - "No errors caught but it seems no used space updates done on space token {}", - spaceToken); - } - log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); - return n > 0; - } + private static final Logger log = LoggerFactory.getLogger(ReservedSpaceCatalog.class); + + private static HashSet voSA_spaceTokenSet = new HashSet(); + private static HashMap voSA_UpdateTime = new HashMap(); + + private static final long NOT_INITIALIZED_SIZE_VALUE = -1L; + + private static ReservedSpaceCatalog instance; + + public static synchronized ReservedSpaceCatalog getInstance() { + if (instance == null) { + instance = new ReservedSpaceCatalog(); + } + return instance; + } + + private StorageSpaceDAO ssDAO; + + private ReservedSpaceCatalog() { + + log.debug("Building Reserve Space Catalog..."); + ssDAO = StorageSpaceDAOMySql.getInstance(); + } + + /********************************************* + * STATIC METHODS + *********************************************/ + public static void addSpaceToken(TSpaceToken token) { + + voSA_spaceTokenSet.add(token); + voSA_UpdateTime.put(token, null); + } + + public static HashSet getTokenSet() { + + return voSA_spaceTokenSet; + } + + public static void clearTokenSet() { + + voSA_spaceTokenSet.clear(); + voSA_UpdateTime.clear(); + } + + /** + * Basic method used to retrieve all the information about a StorageSpace - StorageSpace is + * selected by SpaceToken + * + * @param spaceToken TSpaceToken + * @return StorageSpaceData, null if no-one SS exists with the specified spaceToken + * @throws DataAccessException + */ + public StorageSpaceData getStorageSpace(TSpaceToken spaceToken) + throws TransferObjectDecodingException, DataAccessException { + + StorageSpaceData result = null; + ssDAO = StorageSpaceDAOMySql.getInstance(); + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = ssDAO.getStorageSpaceByToken(spaceToken.getValue()); + log.debug("Storage Space retrieved by Token. "); + if (ssTO != null) { + try { + result = new StorageSpaceData(ssTO); + } catch (IllegalArgumentException e) { + log.error( + "Error building StorageSpaceData from StorageSpaceTO " + "IllegalArgumentException: {}", + e.getLocalizedMessage(), e); + throw new TransferObjectDecodingException( + "Unable to build StorageSpaceData from StorageSpaceTO"); + } + } else { + log.info("Unable to build StorageSpaceData. No StorageSpaceTO built " + "from the DB"); + } + return result; + } + + /** + * Create a new StorageSpace entry into the DB. It is used for - STATIC Space Creation - DYNAMIC + * Space Reservation + * + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void addStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("ADD StorageSpace Start..."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + log.debug("Storage Space DAO retrieved."); + ssDAO.addStorageSpace(ssTO); + log.debug("StorageSpaceTO inserted in Persistence"); + } + + /** + * Update all the fields apart from the alias of a storage space row given the input + * StorageSpaceData + * + * @param ssd + * + * @throws DataAccessException + */ + public void updateStorageSpace(StorageSpaceData ssd) throws DataAccessException { + + updateStorageSpace(ssd, null); + } + + /** + * @param ssd + * @param updateTime + * + * @throws DataAccessException + */ + public void updateStorageSpace(StorageSpaceData ssd, Date updateTime) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + updateTime = updateTime == null ? new Date() : updateTime; + ssTO.setUpdateTime(updateTime); + + ssDAO.updateStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } + + /** + * @param ssd + */ + public void updateStorageSpaceFreeSpace(StorageSpaceData ssd) throws DataAccessException { + + log.debug("Storage Space DAO retrieved."); + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + ssTO.setUpdateTime(new Date()); + ssDAO.updateStorageSpaceFreeSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + + } + + /** + * @param ssd + * @throws NoDataFoundException + * @throws InvalidRetrievedDataException + * @throws MultipleDataEntriesException + */ + public void updateAllStorageSpace(StorageSpaceData ssd) { + + updateAllStorageSpace(ssd, null); + } + + /** + * Update StorageSpace. This method is used to update the StorageSpace into the ReserveSpace + * Catalog. The update operation take place after a AbortRequest for a PrepareToPut operation done + * with the spaceToken.(With or without the size specified). + */ + + public void updateAllStorageSpace(StorageSpaceData ssd, Date updateTime) { + + log.debug("UPDATE StorageSpace Start..."); + // Build StorageSpaceTO from SpaceData + StorageSpaceTO ssTO = new StorageSpaceTO(ssd); + log.debug("Storage Space TO Created"); + if (updateTime == null) { + // The update time of the information is now + ssTO.setUpdateTime(new Date()); + } else { + ssTO.setUpdateTime(updateTime); + } + + // Add the row to the persistence.. + try { + ssDAO.updateAllStorageSpace(ssTO); + log.debug("StorageSpaceTO updated in Persistence"); + } catch (DataAccessException daEx) { + log.error("Error while inserting new row in StorageSpace: {}", daEx.getMessage(), daEx); + } + } + + /** + * @param desc + * @return + */ + public StorageSpaceData getStorageSpaceByAlias(String desc) { + + StorageSpaceData result = null; // new StorageSpaceData(); + log.debug("Retrieve Storage Space start... "); + + // Get StorageSpaceTO form persistence + try { + Collection cl = ssDAO.getStorageSpaceByAliasOnly(desc); + if (cl != null && !cl.isEmpty()) { + log.debug("Storage Space retrieved by Token. "); + // Build the result + try { + result = new StorageSpaceData(cl.toArray(new StorageSpaceTO[0])[0]); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } + + return result; + } + + /** + * Provides a list of storage spaces not initialized by comparing the used space stored against + * the well know not initialized value NOT_INITIALIZED_SIZE_VALUE + * + * @return SpaceData + */ + public List getStorageSpaceNotInitialized() { + + log.debug("Retrieve Storage Space not initialized start "); + List result = Lists.newLinkedList(); + + // Get StorageSpaceTO form persistence + try { + Collection storagesSpaceTOCollection = + ssDAO.getStorageSpaceByUnavailableUsedSpace(NOT_INITIALIZED_SIZE_VALUE); + log.debug("Storage Space retrieved by not initialized used space. "); + for (StorageSpaceTO storagesSpaceTO : storagesSpaceTOCollection) { + if (storagesSpaceTO != null) { + try { + result.add(new StorageSpaceData(storagesSpaceTO)); + } catch (IllegalArgumentException e) { + log.error("unable to build StorageSpaceData from StorageSpaceTO. " + + "IllegalArgumentException: {}", e.getMessage(), e); + } + } else { + log.warn("Received a collection of StorageSpaceTO containing null " + + "elements, skipping them"); + } + } + } catch (DataAccessException daEx) { + log.debug("Error while retrieving StorageSpace", daEx); + } + return result; + } + + /** + * + * @param user VomsGridUser + * @param spaceAlias String + * @return ArrayOfTSpaceToken + */ + public ArrayOfTSpaceToken getSpaceTokens(GridUserInterface user, String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = ssDAO.getStorageSpaceByOwner(user, spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("getSpaceTokens : Number of Storage spaces retrieved with " + "Alias '{}': {}", + spaceAlias, nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Exception while retrieving Storage Space: {}", e.getMessage(), e); + } + return result; + } + + /** + * This method is used for the VOspaceArea Check. + * + * @param spaceAlias + * @return + */ + + public ArrayOfTSpaceToken getSpaceTokensByAlias(String spaceAlias) { + + ArrayOfTSpaceToken result = new ArrayOfTSpaceToken(); + + log.debug("Retrieving space tokens..."); + + try { + + Collection listOfStorageSpace = ssDAO.getStorageSpaceByAliasOnly(spaceAlias); + int nItems = listOfStorageSpace.size(); + log.debug("Number of Storage spaces retrieved: {}", nItems); + Iterator j_ssTO = listOfStorageSpace.iterator(); + + while (j_ssTO.hasNext()) { + StorageSpaceTO ssTO = (StorageSpaceTO) j_ssTO.next(); + try { + TSpaceToken spaceToken = TSpaceToken.make(ssTO.getSpaceToken()); + result.addTSpaceToken(spaceToken); + } catch (InvalidTSpaceTokenAttributesException ex2) { + log.error("Retrieved invalid Space token from DB"); + } + } + + } catch (DataAccessException daEx) { + log.error("Error while retrieving StorageSpace: {}", daEx.getMessage(), daEx); + } catch (Exception e) { + log.error("Error getting data! Error: {}", e.getMessage(), e); + } + return result; + } + + // ************************ CHECH BELOW METHODS *************************** + + /** + * + * @param user GridUserInterface + * @param spaceToken TSpaceToken + * @return boolean + */ + public boolean release(GridUserInterface user, final TSpaceToken spaceToken) { + + log.debug("Delete storage spaceToken info from persistence: {}", spaceToken); + + boolean rowRemoved = true; + // Delete the row from persistence. + try { + ssDAO.removeStorageSpace(user, spaceToken.getValue()); + log.debug("spaceToken removed from DB."); + } catch (DataAccessException daEx) { + log.error("spaceToken not found in the DB: {}", spaceToken.getValue()); + rowRemoved = false; + } + return rowRemoved; + } + + /** + * Method that purges the catalog, removing expired space reservation. The spacefile with lifetime + * expired are removed from the file systems. + * + */ + public void purge() { + + log.debug("Space Garbage Collector start!"); + Calendar rightNow = Calendar.getInstance(); + + // Get the Collection of Space Reservation Expired + Collection expiredSpaceTO; + try { + expiredSpaceTO = ssDAO.getExpired(rightNow.getTimeInMillis() / 1000); + } catch (DataAccessException e) { + // No space expired FOUND + log.debug("Space Garbage Collector: no space expired found."); + return; + } + + // For each entry expired + // 1) Delete the related space file + // 2) Remove the entry from the DB + + StorageSpaceTO spaceTO = null; + log.debug("Space Garbage Collector: Number of SpaceFile to remove {}.", expiredSpaceTO.size()); + + for (Iterator i = expiredSpaceTO.iterator(); i.hasNext();) { + spaceTO = (StorageSpaceTO) i.next(); + // Deleting space File + String spaceFileName = spaceTO.getSpaceFile(); + File sfile = new File(spaceFileName); + log.debug("Space Garbage Collector: SpaceFile to remove {}.", spaceFileName); + + if (sfile.delete()) { + log.debug("Space Garbage Collector: SpaceFile {} removed.", spaceFileName); + } else { + log.warn("Space Garbage Collector: problem removing {}", spaceFileName); + } + + // Removing space entry from the DB + try { + ssDAO.removeStorageSpace(spaceTO.getSpaceToken()); + } catch (DataAccessException e) { + log.warn("Space Garbage Collector: error removing space entry from catalog."); + } + } + } + + public boolean increaseUsedSpace(String spaceToken, Long usedSpaceToAdd) { + + log.debug("Increase {} the used space of storage spaceToken: {}", usedSpaceToAdd, spaceToken); + + int n = 0; + try { + n = ssDAO.increaseUsedSpace(spaceToken, usedSpaceToAdd); + } catch (DataAccessException daEx) { + log.error("Error during the increase of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} increaseUsedSpace += {}", spaceToken, usedSpaceToAdd); + return n > 0; + } + + public boolean decreaseUsedSpace(String spaceToken, Long usedSpaceToRemove) { + + log.debug("Decrease {} the used space of storage spaceToken: {}", usedSpaceToRemove, + spaceToken); + + int n = 0; + try { + n = ssDAO.decreaseUsedSpace(spaceToken, usedSpaceToRemove); + } catch (DataAccessException daEx) { + log.error("Error during the decrease of used space for spaceToken {}: {}", spaceToken, + daEx.getMessage()); + return false; + } + if (n == 0) { + log.warn("No errors caught but it seems no used space updates done on space token {}", + spaceToken); + } + log.debug("{} decreaseUsedSpace -= {}", spaceToken, usedSpaceToRemove); + return n > 0; + } } diff --git a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java deleted file mode 100644 index bc48611b3..000000000 --- a/src/main/java/it/grid/storm/catalogs/SizeInBytesIntConverter.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSizeInBytes; - -/** - * Class that handles DB representation of a TSizeInBytes, in particular it - * takes care of the NULL logic of the DB: 0/null are used to mean an empty - * field, whereas StoRM Object model uses the type TSizeInBytes.makeEmpty(); - * moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ important to - * use this converter! - * - * @author EGRID ICTP - * @version 2.0 - * @date July 2005 - */ -public class SizeInBytesIntConverter { - - private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); - - private SizeInBytesIntConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static SizeInBytesIntConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TSizeInBytes into the empty representation - * of DB which is 0. Any other int is left as is. - */ - public long toDB(long s) { - - if (s == TSizeInBytes.makeEmpty().value()) - return 0; - return s; - } - - /** - * Method that returns the int as is, except if it is 0 which DB interprests - * as empty field: in that case it then returns the Empty TSizeInBytes int - * representation. - */ - public long toStoRM(long s) { - - if (s == 0) - return TSizeInBytes.makeEmpty().value(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java deleted file mode 100644 index 75c79230f..000000000 --- a/src/main/java/it/grid/storm/catalogs/SpaceTokenStringConverter.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TSpaceToken; - -/** - * Class that handles DPM DB representation of a SpaceToken, in particular it - * takes care of the NULL/EMPTY logic of DPM. In particular DPM uses the empty - * string "" as meaning the absence of a value for the field, wheras StoRM - * accepts it as a valis String with which to create a TSpaceToken; moreover - * StoRM uses an Empty TSpaceToken type. - * - * @author EGRID ICTP - * @version 1.0 - * @date June 2005 - */ -class SpaceTokenStringConverter { - - private static SpaceTokenStringConverter stc = new SpaceTokenStringConverter(); - - private SpaceTokenStringConverter() { - - } - - /** - * Method that returns the only instance od SpaceTokenConverter - */ - public static SpaceTokenStringConverter getInstance() { - - return stc; - } - - /** - * Method that translates StoRM Empty TSpaceToken String representation into - * DPM empty representation; all other Strings are left as are. - */ - public String toDB(String s) { - - if (s.equals(TSpaceToken.makeEmpty().toString())) - return ""; - return s; - } - - /** - * Method that translates DPM String representing an Empty TSpaceToken into - * StoRM representation; any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TSpaceToken.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java b/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java deleted file mode 100644 index 4e260d76d..000000000 --- a/src/main/java/it/grid/storm/catalogs/StatusCodeConverter.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import it.grid.storm.srm.types.TStatusCode; - -/** - * Package private auxiliary class used to convert between DB raw data and StoRM - * object model representation of StatusCode. - * - * @author: EGRID ICTP - * @version: 2.0 - * @date: June 2005 - */ -public class StatusCodeConverter { - - private Map DBtoSTORM = new HashMap(); - private Map STORMtoDB = new HashMap(); - - private static StatusCodeConverter c = new StatusCodeConverter(); - - private StatusCodeConverter() { - - DBtoSTORM.put(new Integer(0), TStatusCode.SRM_SUCCESS); - DBtoSTORM.put(new Integer(1), TStatusCode.SRM_FAILURE); - DBtoSTORM.put(new Integer(2), TStatusCode.SRM_AUTHENTICATION_FAILURE); - DBtoSTORM.put(new Integer(3), TStatusCode.SRM_AUTHORIZATION_FAILURE); - DBtoSTORM.put(new Integer(4), TStatusCode.SRM_INVALID_REQUEST); - DBtoSTORM.put(new Integer(5), TStatusCode.SRM_INVALID_PATH); - DBtoSTORM.put(new Integer(6), TStatusCode.SRM_FILE_LIFETIME_EXPIRED); - DBtoSTORM.put(new Integer(7), TStatusCode.SRM_SPACE_LIFETIME_EXPIRED); - DBtoSTORM.put(new Integer(8), TStatusCode.SRM_EXCEED_ALLOCATION); - DBtoSTORM.put(new Integer(9), TStatusCode.SRM_NO_USER_SPACE); - DBtoSTORM.put(new Integer(10), TStatusCode.SRM_NO_FREE_SPACE); - DBtoSTORM.put(new Integer(11), TStatusCode.SRM_DUPLICATION_ERROR); - DBtoSTORM.put(new Integer(12), TStatusCode.SRM_NON_EMPTY_DIRECTORY); - DBtoSTORM.put(new Integer(13), TStatusCode.SRM_TOO_MANY_RESULTS); - DBtoSTORM.put(new Integer(14), TStatusCode.SRM_INTERNAL_ERROR); - DBtoSTORM.put(new Integer(15), TStatusCode.SRM_FATAL_INTERNAL_ERROR); - DBtoSTORM.put(new Integer(16), TStatusCode.SRM_NOT_SUPPORTED); - DBtoSTORM.put(new Integer(17), TStatusCode.SRM_REQUEST_QUEUED); - DBtoSTORM.put(new Integer(18), TStatusCode.SRM_REQUEST_INPROGRESS); - DBtoSTORM.put(new Integer(19), TStatusCode.SRM_REQUEST_SUSPENDED); - DBtoSTORM.put(new Integer(20), TStatusCode.SRM_ABORTED); - DBtoSTORM.put(new Integer(21), TStatusCode.SRM_RELEASED); - DBtoSTORM.put(new Integer(22), TStatusCode.SRM_FILE_PINNED); - DBtoSTORM.put(new Integer(23), TStatusCode.SRM_FILE_IN_CACHE); - DBtoSTORM.put(new Integer(24), TStatusCode.SRM_SPACE_AVAILABLE); - DBtoSTORM.put(new Integer(25), TStatusCode.SRM_LOWER_SPACE_GRANTED); - DBtoSTORM.put(new Integer(26), TStatusCode.SRM_DONE); - DBtoSTORM.put(new Integer(27), TStatusCode.SRM_PARTIAL_SUCCESS); - DBtoSTORM.put(new Integer(28), TStatusCode.SRM_REQUEST_TIMED_OUT); - DBtoSTORM.put(new Integer(29), TStatusCode.SRM_LAST_COPY); - DBtoSTORM.put(new Integer(30), TStatusCode.SRM_FILE_BUSY); - DBtoSTORM.put(new Integer(31), TStatusCode.SRM_FILE_LOST); - DBtoSTORM.put(new Integer(32), TStatusCode.SRM_FILE_UNAVAILABLE); - DBtoSTORM.put(new Integer(33), TStatusCode.SRM_CUSTOM_STATUS); - - Object aux; - for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { - aux = i.next(); - STORMtoDB.put(DBtoSTORM.get(aux), aux); - } - } - - /** - * Method that returns the only instance of StatusCodeConverter. - */ - public static StatusCodeConverter getInstance() { - - return c; - } - - /** - * Method that returns the int used in the DB to represent the given - * TStatusCode. -1 is returned if no match is found. - */ - public int toDB(TStatusCode sc) { - - Integer aux = (Integer) STORMtoDB.get(sc); - if (aux == null) - return -1; - return aux.intValue(); - } - - /** - * Method that returns the TStatusCode used by StoRM to represent the supplied - * int representation of the DB. TStatusCode.EMPTY is returned if no StoRM - * type is found. - */ - public TStatusCode toSTORM(int n) { - - TStatusCode aux = DBtoSTORM.get(new Integer(n)); - if (aux == null) - return TStatusCode.EMPTY; - return aux; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java b/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java deleted file mode 100644 index 01fa46777..000000000 --- a/src/main/java/it/grid/storm/catalogs/StoRMDataSource.java +++ /dev/null @@ -1,177 +0,0 @@ -package it.grid.storm.catalogs; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.apache.commons.dbcp.BasicDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -public class StoRMDataSource { - - public static final Logger log = LoggerFactory - .getLogger(StoRMDataSource.class); - - public static class Builder{ - - private static final String VALIDATION_QUERY = "select 1 from dual"; - - private String driver; - private String url; - - private String username; - private String password; - - private int maxPooledConnections = 200; - private int initialPoolSize = 10; - - private BasicDataSource ds; - - public Builder() { - } - - public Builder driver(String driver){ - this.driver = driver; - return this; - } - - public Builder url(String url){ - this.url = url; - return this; - } - - public Builder username(String username){ - this.username = username; - return this; - } - - public Builder password(String password){ - this.password = password; - return this; - } - - public Builder maxPooledConnections(int maxPool){ - if (maxPool < 1){ - throw new IllegalArgumentException("maxPooledConnections must be >= 1"); - } - this.maxPooledConnections = maxPool; - return this; - } - - public Builder initialPoolSize(int initialSize){ - if (initialSize <= 0){ - throw new IllegalArgumentException("initialSize must be >= 0"); - } - this.initialPoolSize = initialSize; - return this; - } - - private void sanityChecks(){ - if ((username == null) || (username.isEmpty())) - throw new IllegalArgumentException("null or empty username"); - - if ((driver == null) || (driver.isEmpty())) - throw new IllegalArgumentException("null or empty driver"); - - if ((url == null) || (url.isEmpty())) - throw new IllegalArgumentException("null or empty url"); - - if ((password == null) || (password.isEmpty())) - throw new IllegalArgumentException("null or empty password"); - } - - private void logConfiguration(){ - if (log.isDebugEnabled()){ - log.debug("driver: {}", driver); - log.debug("url: {}", url); - log.debug("username: {}", username); - log.debug("password: {}", password); - log.debug("initialPoolSize: {}", initialPoolSize); - log.debug("maxPooledConnections: {}", maxPooledConnections); - } - } - public StoRMDataSource build(){ - sanityChecks(); - logConfiguration(); - ds = new BasicDataSource(); - ds.setDriverClassName(driver); - ds.setUrl(url); - ds.setUsername(username); - ds.setPassword(password); - ds.setInitialSize(initialPoolSize); - ds.setMaxActive(maxPooledConnections); - ds.setValidationQuery(VALIDATION_QUERY); - ds.setTestWhileIdle(true); - ds.setPoolPreparedStatements(true); - ds.setMaxOpenPreparedStatements(200); - return new StoRMDataSource(this); - } - - } - - private StoRMDataSource(Builder b) { - this.dataSource = b.ds; - } - - private BasicDataSource dataSource; - - - /** - * @return the dataSource - */ - public DataSource getDataSource() { - return dataSource; - } - - - /** - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#close() - */ - public void close() throws SQLException { - dataSource.close(); - } - - - - /** - * @return - * @throws SQLException - * @see org.apache.commons.dbcp.BasicDataSource#getConnection() - */ - public Connection getConnection() throws SQLException { - return dataSource.getConnection(); - } - - private static volatile StoRMDataSource instance = null; - - public static synchronized StoRMDataSource getInstance(){ - return instance; - } - - public static synchronized void init(){ - if (instance != null){ - log.warn("Called init on already initialized Storm data source."); - log.warn("The datasource will be closed and re-initialized."); - try { - instance.close(); - } catch (SQLException e) { - log.error("Error closing storm data source: {}", e.getMessage(), e); - } - } - - log.info("Initializing StoRM datasource"); - Configuration conf = Configuration.getInstance(); - instance = new StoRMDataSource.Builder() - .driver(conf.getDBDriver()) - .url(conf.getDBURL()) - .username(conf.getDBUserName()) - .password(conf.getDBPassword()) - .build(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java b/src/main/java/it/grid/storm/catalogs/SurlRequestData.java deleted file mode 100644 index f56079a43..000000000 --- a/src/main/java/it/grid/storm/catalogs/SurlRequestData.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import java.util.Map; - -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author Michele Dibenedetto - * - */ -public abstract class SurlRequestData implements RequestData { - - private static final Logger log = LoggerFactory - .getLogger(SurlRequestData.class); - - protected TSURL SURL; - protected TReturnStatus status; - - public SurlRequestData(TSURL toSURL, TReturnStatus status) - throws InvalidSurlRequestDataAttributesException { - - if (toSURL == null || status == null || status.getStatusCode() == null) { - throw new InvalidSurlRequestDataAttributesException(toSURL, status); - } - this.SURL = toSURL; - this.status = status; - } - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - @Override - public final TSURL getSURL() { - - return SURL; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - @Override - public final TReturnStatus getStatus() { - - return status; - } - - /** - * Method used to set the Status associated to this chunk. If status is null, - * then nothing gets set! - */ - public void setStatus(TReturnStatus status) { - - if (status != null) { - this.status = status; - } - } - - protected void setStatus(TStatusCode statusCode, String explanation) { - - if (explanation == null) { - status = new TReturnStatus(statusCode); - } else { - status = new TReturnStatus(statusCode, explanation); - } - } - - /** - * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_REQUEST_QUEUED(String explanation) { - - setStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); - } - - /** - * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { - - setStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_SUCCESS; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_SUCCESS(String explanation) { - - setStatus(TStatusCode.SRM_SUCCESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_INTERNAL_ERROR(String explanation) { - - setStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_REQUEST; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_INVALID_REQUEST(String explanation) { - - setStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); - } - - /** - * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; - * it needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { - - setStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); - } - - /** - * Method that sets the status of this request to SRM_ABORTED; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - @Override - public final void changeStatusSRM_ABORTED(String explanation) { - - setStatus(TStatusCode.SRM_ABORTED, explanation); - } - - @Override - public final void changeStatusSRM_FILE_BUSY(String explanation) { - - setStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - @Override - public final void changeStatusSRM_INVALID_PATH(String explanation) { - - setStatus(TStatusCode.SRM_INVALID_PATH, explanation); - } - - @Override - public final void changeStatusSRM_NOT_SUPPORTED(String explanation) { - - setStatus(TStatusCode.SRM_NOT_SUPPORTED, explanation); - } - - @Override - public final void changeStatusSRM_FAILURE(String explanation) { - - setStatus(TStatusCode.SRM_FAILURE, explanation); - } - - @Override - public final void changeStatusSRM_SPACE_LIFETIME_EXPIRED(String explanation) { - - setStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, explanation); - } - - @Override - public String display(Map map) { - - // nonsense method - return ""; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - - final int prime = 31; - int result = 1; - result = prime * result + ((SURL == null) ? 0 : SURL.hashCode()); - result = prime * result + ((status == null) ? 0 : status.hashCode()); - return result; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#equals(java.lang.Object) - */ - @Override - public boolean equals(Object obj) { - - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - SurlRequestData other = (SurlRequestData) obj; - if (SURL == null) { - if (other.SURL != null) { - return false; - } - } else if (!SURL.equals(other.SURL)) { - return false; - } - if (status == null) { - if (other.status != null) { - return false; - } - } else if (!status.equals(other.status)) { - return false; - } - return true; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - - StringBuilder builder = new StringBuilder(); - builder.append("SurlRequestData [SURL="); - builder.append(SURL); - builder.append(", status="); - builder.append(status); - builder.append("]"); - return builder.toString(); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/TURLConverter.java b/src/main/java/it/grid/storm/catalogs/TURLConverter.java deleted file mode 100644 index c20bece1f..000000000 --- a/src/main/java/it/grid/storm/catalogs/TURLConverter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.srm.types.TTURL; - -/** - * Class that handles DPM DB representation of a TTURL, in particular it takes - * care of the NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty - * field, whereas StoRM uses the type TTURL.makeEmpty(); in particular StoRM - * converts an empty String or a null to an Empty TTURL! - * - * @author EGRID ICTP - * @version 1.0 - * @date March 2006 - */ -public class TURLConverter { - - private static TURLConverter stc = new TURLConverter(); // only instance - - private TURLConverter() { - - } - - /** - * Method that returns the only instance of SizeInBytesIntConverter - */ - public static TURLConverter getInstance() { - - return stc; - } - - /** - * Method that transaltes the Empty TTURL into the empty representation of DPM - * which is a null! Any other String is left as is. - */ - public String toDB(String s) { - - if (s.equals(TTURL.makeEmpty().toString())) - return null; - return s; - } - - /** - * Method that translates DPMs "" or null String as the Empty TTURL String - * representation. Any other String is left as is. - */ - public String toStoRM(String s) { - - if ((s == null) || (s.equals(""))) - return TTURL.makeEmpty().toString(); - return s; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java deleted file mode 100644 index 5eb9a5c97..000000000 --- a/src/main/java/it/grid/storm/catalogs/TransferProtocolListConverter.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import it.grid.storm.common.types.TURLPrefix; -import java.util.Iterator; -import java.util.List; -import java.util.ArrayList; -import it.grid.storm.namespace.model.Protocol; - -/** - * Package private auxiliary class used to convert between the DB raw data - * representation and StoRM s Object model list of transfer protocols. - * - */ - -class TransferProtocolListConverter { - - /** - * Method that returns a List of Uppercase Strings used in the DB to represent - * the given TURLPrefix. An empty List is returned in case the conversion does - * not succeed, a null TURLPrefix is supplied, or its size is 0. - */ - public static List toDB(TURLPrefix turlPrefix) { - - List result = new ArrayList(); - Protocol protocol; - for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it - .hasNext();) { - protocol = it.next(); - result.add(protocol.getSchema()); - } - return result; - } - - /** - * Method that returns a TURLPrefix of transfer protocol. If the translation - * cannot take place, a TURLPrefix of size 0 is returned. Likewise if a null - * List is supplied. - */ - public static TURLPrefix toSTORM(List listOfProtocol) { - - TURLPrefix turlPrefix = new TURLPrefix(); - Protocol protocol = null; - for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { - protocol = Protocol.getProtocol(i.next()); - if (!(protocol.equals(Protocol.UNKNOWN))) - turlPrefix.addProtocol(protocol); - } - return turlPrefix; - } -} diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java index 5f2ef76a1..1e686d36c 100644 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java +++ b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTCatalog.java @@ -17,17 +17,6 @@ package it.grid.storm.catalogs; -import it.grid.storm.acl.AclManager; -import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.common.types.PFN; -import it.grid.storm.common.types.TimeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.FilesystemPermission; -import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.LocalUser; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.srm.types.TLifeTimeInSeconds; - import java.util.ArrayList; import java.util.Calendar; import java.util.Collection; @@ -39,41 +28,48 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.acl.AclManager; +import it.grid.storm.acl.AclManagerFS; +import it.grid.storm.common.types.PFN; +import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.config.Configuration; +import it.grid.storm.filesystem.FilesystemPermission; +import it.grid.storm.filesystem.LocalFile; +import it.grid.storm.griduser.LocalUser; +import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.impl.mysql.VolatileAndJiTDAOMySql; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.srm.types.TLifeTimeInSeconds; + /** - * This catalog holds all info needed to pin files for JiT ACL tracking, and - * for keeping track of Volatile files. pinLifetime is the time Jit ACLs will be - * in place: upon expiry ACLs are removed; fileLifetime is the time Volatile - * files will remain in the system: upon expiry those files are removed. In - * particular the srmPrepareToPut analyzes the request and if the specified file - * is set to Volatile, then it calls on the catalog to add the corresponding - * entry for the given fileLifetime. If StoRM is configured for JiT, another - * method is invoked to add an entry to keep track of the ACLs for the desired - * pinLifetime. For srmPrepareToGet, only if StoRM is configured for JiT ACLs - * then a method is invoked to add the corresponding entry for the given - * pinLifetime. Repeatedly putting the same Volatile file, will overwrite - * existing fileLifetime only if the overwrite option allows file overwriting. - * If JiT is enabled and it is a new user that is putting again the same file - * in, a new pinLifetime entry is added; but if it is the same user, the - * pinLifetime WILL be changed provided the new expiry exceeds the current one! - * Repeatedly invoking PtG on the same file behaves similarly: different users - * will have their own pinLifetime record, but the same user WILL change the - * pinLifetime provided the new expiry exceeds the current one! In case the - * pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. - * This may occur when a file is Put and defined Volatile, but with a - * pinLifetime that is longer than that of the pin. Or if _subsequent_ calls to - * PtG specify a pinLifetime that lasts longer. To be more precise, the - * pinLifetime gets recorded as requested, but upon expiry of the volatile entry - * any associated acl will get removed as well, regardless of the acl expiry. - * When lifetime expires: volatile files get erased from the system and their - * entries in the catalog are removed; tracked ACLs get removed from the files - * WITHOUT erasing the files, and their entries in the catalog are removed; - * finally for Volatile files with ACLs set up on them, the ACLs are removed AND - * the files are erased, also cleaning up the catalog. As a last note, the - * catalog checks periodically its entries for any expired ones, and then - * proceeds with purging; this frequency of cleaning is specified in a - * configuration parameter, and the net effect is that the pinning/volatile may - * actually last longer (but never less) because the self cleaning mechanism is - * active only at those predetermined times. + * This catalog holds all info needed to pin files for JiT ACL tracking, and for keeping track of + * Volatile files. pinLifetime is the time Jit ACLs will be in place: upon expiry ACLs are removed; + * fileLifetime is the time Volatile files will remain in the system: upon expiry those files are + * removed. In particular the srmPrepareToPut analyzes the request and if the specified file is set + * to Volatile, then it calls on the catalog to add the corresponding entry for the given + * fileLifetime. If StoRM is configured for JiT, another method is invoked to add an entry to keep + * track of the ACLs for the desired pinLifetime. For srmPrepareToGet, only if StoRM is configured + * for JiT ACLs then a method is invoked to add the corresponding entry for the given pinLifetime. + * Repeatedly putting the same Volatile file, will overwrite existing fileLifetime only if the + * overwrite option allows file overwriting. If JiT is enabled and it is a new user that is putting + * again the same file in, a new pinLifetime entry is added; but if it is the same user, the + * pinLifetime WILL be changed provided the new expiry exceeds the current one! Repeatedly invoking + * PtG on the same file behaves similarly: different users will have their own pinLifetime record, + * but the same user WILL change the pinLifetime provided the new expiry exceeds the current one! In + * case the pinLifetime exceeds the fileLifetime, the fileLifetime is used as ceiling. This may + * occur when a file is Put and defined Volatile, but with a pinLifetime that is longer than that of + * the pin. Or if _subsequent_ calls to PtG specify a pinLifetime that lasts longer. To be more + * precise, the pinLifetime gets recorded as requested, but upon expiry of the volatile entry any + * associated acl will get removed as well, regardless of the acl expiry. When lifetime expires: + * volatile files get erased from the system and their entries in the catalog are removed; tracked + * ACLs get removed from the files WITHOUT erasing the files, and their entries in the catalog are + * removed; finally for Volatile files with ACLs set up on them, the ACLs are removed AND the files + * are erased, also cleaning up the catalog. As a last note, the catalog checks periodically its + * entries for any expired ones, and then proceeds with purging; this frequency of cleaning is + * specified in a configuration parameter, and the net effect is that the pinning/volatile may + * actually last longer (but never less) because the self cleaning mechanism is active only at those + * predetermined times. * * @author EGRID - ICTP Trieste * @version 2.0 @@ -81,536 +77,485 @@ */ public class VolatileAndJiTCatalog { - private static final Logger log = LoggerFactory - .getLogger(VolatileAndJiTCatalog.class); - - /** only instance of Catalog! */ - private static final VolatileAndJiTCatalog cat = new VolatileAndJiTCatalog(); - /** only instance of DAO object! */ - private static final VolatileAndJiTDAO dao = VolatileAndJiTDAO.getInstance(); - /** Timer object in charge of cleaning periodically the Catalog! */ - private final Timer cleaner = new Timer(); - /** Delay time before starting cleaning thread! Set to 1 minute */ - private final long delay = Configuration.getInstance() - .getCleaningInitialDelay() * 1000; - /** Period of execution of cleaning! Set to 1 hour */ - private final long period = Configuration.getInstance() - .getCleaningTimeInterval() * 1000; - /** fileLifetime to use if user specified a non-positive value */ - private final long defaultFileLifetime = Configuration.getInstance() - .getFileLifetimeDefault(); - /** Number of seconds to use as default if the supplied lifetime is zero! */ - private final long floor = Configuration.getInstance() - .getPinLifetimeDefault(); - /** - * Maximum number of seconds that an ACL can live: the life time requested by - * the user cannot be greater than this value! This ceiling is needed because - * of the cron job that removes pool account mappings: when the mapping is - * removed, there must NOT be ANY ACL for that pool-user left! - */ - private final long ceiling = Configuration.getInstance() - .getPinLifetimeMaximum(); - - /** - * Private constructor that starts the cleaning timer. - */ - private VolatileAndJiTCatalog() { - - TimerTask cleaningTask = new TimerTask() { - - @Override - public void run() { - - purge(); - } - }; - cleaner.scheduleAtFixedRate(cleaningTask, delay, period); - } - - /** - * Method that returns the only instance of PinnedFilesCatalog. - */ - public static VolatileAndJiTCatalog getInstance() { - - return cat; - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - synchronized public boolean exists(PFN pfn) { - - return dao.exists(pfn.getValue()); - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtG operation. The method is intended to be used by code - * handling srmAbort command. Notice that the Traverse on the parents is NOT - * removed! This is to accomodate for the use case of a user that has run many - * PtG on different SURLs but all contained in the same directory tree! In - * practice this method removes the R permission. If any entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise - * entries get their start time set to now, and the lifetime set to zero; in - * case more than one matching entry is found, a message gets written to the - * logs, and the updating continues anyway as explained. At this point, when - * the garbage collector wakes up the entries get cleanly handled (physical - * ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be - * forced by invoking directly the purge mehod. The method returns FALSE in - * case an entry was not found or the supplied parameters were null, and TRUE - * otherwise. Yet keep in mind that it says nothing of whether the DB - * operation was successful or not. - */ - synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read); - } - log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method used to expire an entry in the JiT catalogue. The method is intended - * to be used by code handling srmAbort command. If the entry does not exist, - * then nothing happens and a warning gets written in the logs; otherwise the - * entry gets its start time set to now, and its lifetime set to zero; in case - * more than one matching entry is found, a message gets written to the logs, - * and the updating continues anyway as explained. At this point, when the - * garbage collector wakes up the entry is cleanly handled (physical ACL is - * removed, catalog entry removed, etc.); or an earlier cleaning can be forced - * by invoking directly the purge method. The method returns FALSE in case no - * entry was found or the supplied parameters were null, and TRUE otherwise. - * Yet keep in mind that is says nothing of whether the DB operation was - * successful or not. - */ - synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl) { - - if (pfn != null && localUser != null && acl != null) { - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int intacl = acl.getInt(); - // from the current time we remove 10 seconds because it was observed - // that when executing purge right after invoking this method, less - // than 1 second elapses, so no purging takes place at all since expiry - // is not yet reached! - // Seconds needed and not milliseconds! - long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; - long pinTime = 0; // set to zero the lifetime! - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " - + "{})!", fileName, uid, intacl); - return false; - } - dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " - + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); - } - return true; - } - log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " - + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); - return false; - } - - /** - * Method used to expire _all_ related entries in the JiT catalogue, that were - * setup during a PtP operation. The method is intended to be used by code - * handling srmAbort command, and by srmPutDone. Notice that the Traverse on - * the parents is NOT removed! This is to accomodate for the use case of a - * user that has run many PtP on different SURLs but that are all contained in - * the same directory tree! In practice, this method removes R and W - * permissions. If any entry does not exist, then nothing happens and a - * warning gets written in the logs; otherwise entries get their start time - * set to now, and the lifetime set to zero; in case more than one matching - * entry is found, a message gets written to the logs, and the updating - * continues anyway as explained. At this point, when the garbage collector - * wakes up the entries get cleanly handled (physical ACL is removed, catalog - * entry removed, etc.); or an earlier cleaning can be forced by invoking - * directly the purge mehod. The method returns FALSE in case an entry was not - * found or the supplied parameters were null, and TRUE otherwise. Yet keep in - * mind that is says nothing of whether the DB operation was successful or - * not. - */ - synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { - - if (pfn != null && localUser != null) { - return expireJiT(pfn, localUser, FilesystemPermission.Read) - && expireJiT(pfn, localUser, FilesystemPermission.Write); - } - - log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " - + "on null attributes; pfn={} localUser={}", pfn, localUser); - return false; - } - - /** - * Method that purges the catalog, removing expired ACLs and deleting expired - * Volatile files. When Volatile entries expire, any realted JiT will - * automatically expire too, regardless of the specified pinLifetime: that is, - * fileLifetime wins over pinLifetime. WARNING! Notice that the catalogue DOES - * get cleaned up even if the physical removal of the ACL or erasing of the - * file fails. - */ - public synchronized void purge() { - - log.debug("VolatileAndJiT CATALOG! Executing purge!"); - Calendar rightNow = Calendar.getInstance(); - /** - * removes all expired entries from storm_pin and storm_track, returning two - * Collections: one with the PFN of Volatile files, and the other with PFN + - * GridUser couple of the entries that were just being tracked for the ACLs - * set up on them. - */ - Collection[] expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); - Collection expiredVolatile = expired[0]; - Collection expiredJiT = expired[1]; - if (expiredVolatile.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "Volatile entries:\n {}", volatileString(expired[0])); - } - if (expiredJiT.size() == 0) { - log.debug("VolatileAndJiT CATALOG! No JiT entries found."); - } else { - log.info("VolatileAndJiT CATALOG! Found and purged the following expired " - + "JiT ACLs entries:\n {}", jitString(expired[1])); - } - // Remove ACLs - JiTData aux = null; - for (Iterator i = expiredJiT.iterator(); i.hasNext();) { - aux = (JiTData) i.next(); - int jitacl = aux.acl(); - String jitfile = aux.pfn(); - int jituid = aux.uid(); - int jitgid = aux.gid(); - try { - log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " - + "user {},{}", jitacl, jitfile, jituid, jitgid); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); - LocalUser auxUser = new LocalUser(jituid, jitgid); - FilesystemPermission auxACL = new FilesystemPermission(jitacl); - - AclManager manager = AclManagerFS.getInstance(); - if (auxFile == null) { - log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " - + "LocalFile is null!"); - } else { - try { - manager.revokeUserPermission(auxFile, auxUser, auxACL); - } catch (IllegalArgumentException e) { - log.error("Unable to revoke user permissions on the file. " - + "IllegalArgumentException: {}", e.getMessage(), e); - } - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical ACL {} for user {}, could NOT be removed from {}", - jitacl, jituid, jitgid, jitfile); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - // Delete files - String auxPFN = null; - for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { - auxPFN = (String) i.next(); - try { - log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); - LocalFile auxFile = NamespaceDirector.getNamespace() - .resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); - boolean ok = auxFile.delete(); - if (!ok) { - throw new Exception("Java File deletion failed!"); - } - } catch (Exception e) { - log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " - + "physical file {} could NOT be deleted!", auxPFN); - log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); - } - } - } - - /** - * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in - * the DB table, related to the given PFN; Notice that _no_ distinction is - * made aboutthe specific user! This is because upon expiry of - * SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are - * automatically erased. This implies that all catalogue entries get removed. - * If no entries are present nothing happens. - */ - public synchronized void removeAllJiTsOn(PFN pfn) { - - if (pfn != null) { - dao.removeAllJiTsOn(pfn.getValue()); - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " - + "invoked on null pfn!"); - } - - /** - * Method used to remove a Volatile entry that matches the supplied pfn, from - * the DB. If null is supplied, an error message gets logged and nothing - * happens. If PFN is not found, nothing happens and _no_ message gets logged. - */ - public synchronized void removeVolatile(PFN pfn) { - - if (pfn != null) { - dao.removeVolatile(pfn.getValue()); - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! removeVolatile invoked " - + "on null pfn!"); - } - - /** - * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the - * LocalUser, the ACL and the desired pinLifeTime. If the 3-ple (PFN, ACL, - * LocalUser) is not present, it gets added; if it is already present, - * provided the new desired expiry occurs after the present one, it gets - * changed. If the supplied lifetime is zero, then a default value is used - * instead. If it is larger than a ceiling, that ceiling is used instead. The - * floor value in seconds can be set from the configuration file, with the - * property: pinLifetime.minimum While the ceiling value in seconds is set - * with: pinLifetime.maximum BEWARE: The intended use case is in both - * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security - * mechanism. The maximum is necessary because JiT ACLs cannot last longer - * than the amount of time the pool account is leased. Notice that for - * Volatile entries, a pinLifetime larger than the fileLifetime can be - * specified. However, when Volatile files expire any related JiTs - * automatically expire in anticipation! - */ - public synchronized void trackJiT(PFN pfn, LocalUser localUser, - FilesystemPermission acl, Calendar start, TLifeTimeInSeconds pinLifetime) { - - if (pfn != null && localUser != null && acl != null && start != null - && pinLifetime != null) { - - String fileName = pfn.getValue(); - int uid = localUser.getUid(); - int gid = localUser.getPrimaryGid(); - int intacl = acl.getInt(); - // seconds needed and not milliseconds! - long pinStart = start.getTimeInMillis() / 1000; - long pinTime = validatePinLifetime(pinLifetime.value()); - int n = dao.numberJiT(fileName, uid, intacl); - if (n == 0) { - dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); - } else { - dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for " - + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, - intacl); - } - } - return; - } - log.error("VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " - + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", - pfn, localUser, acl, start, pinLifetime); - } - - /** - * Method that adds an entry to the catalog that keeps track of Volatile - * files. The PFN and the fileLifetime are needed. If no entry corresponding - * to the given PFN is found, a new one gets recorded. If the PFN is already - * present, then provided the new expiry (obtained by adding together - * current-time and requested-lifetime) exceeds the expiry in the catalog, - * the entry is updated. Otherwise nothing takes place. If the supplied - * fileLifetime is zero, then a default value is used instead. This floor - * default value in seconds can be set from the configuration file, with the - * property: fileLifetime.default BEWARE: The intended use case for this - * method is during srmPrepareToPut. When files are uploaded into StoRM, they - * get specified as Volatile or Permanent. The PtP logic determines if the - * request is for a Volatile file and in that case it adds a new entry in the - * catalog. That is the purpose of this method. Any subsequent PtP call will - * just result in a modification of the expiry, provided the newer one lasts - * longer than the original one. Yet bear in mind that two or more PtP on the - * same file makes NO SENSE AT ALL! If any DB error occurs, then nothing gets - * added/updated and an error message gets logged. - */ - public synchronized void trackVolatile(PFN pfn, Calendar start, - TLifeTimeInSeconds fileLifetime) { - - if (pfn != null && fileLifetime != null && start != null) { - - String fileName = pfn.getValue(); - long fileTime = fileLifetime.value(); - if (fileTime <= 0) { - fileTime = defaultFileLifetime; - } - long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not - // milliseconds! - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - } else if (n == 0) { - dao.addVolatile(fileName, fileStart, fileTime); - } else { - dao.updateVolatile(fileName, fileStart, fileTime); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - return; - } - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, - fileLifetime); - } - - public synchronized void setStartTime(PFN pfn, Calendar start) - throws Exception { - - if (pfn == null || start == null) { - log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " - + "on null attributes; pfn={} start={}", pfn, start); - return; - } - - String fileName = pfn.getValue(); - // seconds needed and not milliseconds! - long fileStart = start.getTimeInMillis() / 1000; - int n = dao.numberVolatile(fileName); - if (n == -1) { - log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " - + "number of Volatile entries for {}! Volatile entry NOT processed!", - pfn); - return; - } - if (n == 0) { - throw new Exception("Unable to update row volatile for pfn \'" + pfn - + "\' , not on the database!"); - } - dao.updateVolatile(fileName, fileStart); - if (n > 1) { - log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " - + "the catalogue could be corrupt!", fileName); - } - } - - /** - * Method that returns a List whose first element is a Calendar with the - * starting date and time of the lifetime of the supplied PFN, and whose - * second element is the TLifeTime the system is keeping the PFN. If no entry - * is found for the given PFN, an empty List is returned. Likewise if any DB - * error occurs. In any case, proper error messages get logged. Moreover - * notice that if for any reason the value for the Lifetime read from the DB - * does not allow creation of a valid TLifeTimeInSeconds, an Empty one is - * returned. Error messages in logs warn of the situation. - */ - public synchronized List volatileInfoOn(PFN pfn) { - - ArrayList aux = new ArrayList(); - if (pfn == null) { - log.error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " - + "invoked on null PFN!"); - return aux; - } - Collection c = dao.volatileInfoOn(pfn.getValue()); - if (c.size() != 2) { - return aux; - } - Iterator i = c.iterator(); - // start time - long startInMillis = i.next().longValue() * 1000; - Calendar auxcal = Calendar.getInstance(); - auxcal.setTimeInMillis(startInMillis); - aux.add(auxcal); - // lifeTime - long lifetimeInSeconds = ((Long) i.next()).longValue(); - TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); - try { - auxLifeTime = TLifeTimeInSeconds - .make(lifetimeInSeconds, TimeUnit.SECONDS); - } catch (IllegalArgumentException e) { - log.error("VolatileAndJiT CATALOG: programming bug! Retrieved long does " - + "not allow TLifeTimeCreation! long is: {}; error is: {}", - lifetimeInSeconds, e.getMessage(), e); - } - aux.add(auxLifeTime); - return aux; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of JiTData. - */ - private String jitString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - sb.append("file,acl,uid,gid\n"); - JiTData aux = null; - for (Iterator i = c.iterator(); i.hasNext();) { - aux = i.next(); - sb.append(aux.pfn()); - sb.append(","); - sb.append(aux.acl()); - sb.append(","); - sb.append(aux.uid()); - sb.append(","); - sb.append(aux.gid()); - if (i.hasNext()) { - sb.append("\n"); - } - } - return sb.toString(); - } - - /** - * Private method that makes sure that the lifeTime of the request: (1) It is - * not less than a predetermined value: this check is needed because clients - * may omit to supply a value and some default one must be used; moreover, it - * is feared that if the requested lifetime is very low, such as 0 or a few - * seconds, there could be strange problems in having a file written and - * erased immediately. (2) It is not larger than a given ceiling; this is - * necessary because in the JiT model, the underlying system may decide to - * remove the pool account mappings; it is paramount that no ACLs remain set - * up for the now un-associated pool account. - */ - private long validatePinLifetime(long lifetime) { - - long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime - // set to zero! - duration = duration <= ceiling ? duration : ceiling; // make sure lifetime - // is not longer than - // the maximum set! - return duration; - } - - /** - * Private method used to return a String representation of the expired - * entries Collection of pfn Strings. - */ - private String volatileString(Collection c) { - - if (c == null) { - return ""; - } - StringBuilder sb = new StringBuilder(); - for (Iterator i = c.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - return sb.toString(); - } + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTCatalog.class); + + private static VolatileAndJiTCatalog instance; + + public static synchronized VolatileAndJiTCatalog getInstance() { + if (instance == null) { + instance = new VolatileAndJiTCatalog(); + } + return instance; + } + + private final VolatileAndJiTDAO dao; + + /** Timer object in charge of cleaning periodically the Catalog! */ + private final Timer cleaner = new Timer(); + /** Delay time before starting cleaning thread! Set to 1 minute */ + private final long delay = Configuration.getInstance().getCleaningInitialDelay() * 1000; + /** Period of execution of cleaning! Set to 1 hour */ + private final long period = Configuration.getInstance().getCleaningTimeInterval() * 1000; + /** fileLifetime to use if user specified a non-positive value */ + private final long defaultFileLifetime = Configuration.getInstance().getFileLifetimeDefault(); + /** Number of seconds to use as default if the supplied lifetime is zero! */ + private final long floor = Configuration.getInstance().getPinLifetimeDefault(); + /** + * Maximum number of seconds that an ACL can live: the life time requested by the user cannot be + * greater than this value! This ceiling is needed because of the cron job that removes pool + * account mappings: when the mapping is removed, there must NOT be ANY ACL for that pool-user + * left! + */ + private final long ceiling = Configuration.getInstance().getPinLifetimeMaximum(); + + /** + * Private constructor that starts the cleaning timer. + */ + private VolatileAndJiTCatalog() { + + dao = VolatileAndJiTDAOMySql.getInstance(); + + TimerTask cleaningTask = new TimerTask() { + + @Override + public void run() { + + purge(); + } + }; + cleaner.scheduleAtFixedRate(cleaningTask, delay, period); + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + synchronized public boolean exists(PFN pfn) { + + return dao.exists(pfn.getValue()); + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtG + * operation. The method is intended to be used by code handling srmAbort command. Notice that the + * Traverse on the parents is NOT removed! This is to accomodate for the use case of a user that + * has run many PtG on different SURLs but all contained in the same directory tree! In practice + * this method removes the R permission. If any entry does not exist, then nothing happens and a + * warning gets written in the logs; otherwise entries get their start time set to now, and the + * lifetime set to zero; in case more than one matching entry is found, a message gets written to + * the logs, and the updating continues anyway as explained. At this point, when the garbage + * collector wakes up the entries get cleanly handled (physical ACL is removed, catalog entry + * removed, etc.); or an earlier cleaning can be forced by invoking directly the purge mehod. The + * method returns FALSE in case an entry was not found or the supplied parameters were null, and + * TRUE otherwise. Yet keep in mind that it says nothing of whether the DB operation was + * successful or not. + */ + synchronized public boolean expireGetJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read); + } + log.error("VolatileAndJiT CATALOG: programming bug! expireGetJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method used to expire an entry in the JiT catalogue. The method is intended to be used by code + * handling srmAbort command. If the entry does not exist, then nothing happens and a warning gets + * written in the logs; otherwise the entry gets its start time set to now, and its lifetime set + * to zero; in case more than one matching entry is found, a message gets written to the logs, and + * the updating continues anyway as explained. At this point, when the garbage collector wakes up + * the entry is cleanly handled (physical ACL is removed, catalog entry removed, etc.); or an + * earlier cleaning can be forced by invoking directly the purge method. The method returns FALSE + * in case no entry was found or the supplied parameters were null, and TRUE otherwise. Yet keep + * in mind that is says nothing of whether the DB operation was successful or not. + */ + synchronized public boolean expireJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl) { + + if (pfn != null && localUser != null && acl != null) { + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int intacl = acl.getInt(); + // from the current time we remove 10 seconds because it was observed + // that when executing purge right after invoking this method, less + // than 1 second elapses, so no purging takes place at all since expiry + // is not yet reached! + // Seconds needed and not milliseconds! + long pinStart = (Calendar.getInstance().getTimeInMillis() / 1000) - 10; + long pinTime = 0; // set to zero the lifetime! + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + log.warn("VolatileAndJiT CATALOG: expireJiT found no entry for ({}, {}, " + "{})!", + fileName, uid, intacl); + return false; + } + dao.forceUpdateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: expireJiT found more than one entry " + + "for ({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + return true; + } + log.error("VolatileAndJiT CATALOG: programming bug! expireJiT invoked on " + + "null attributes; pfn={} localUser={} acl={}", pfn, localUser, acl); + return false; + } + + /** + * Method used to expire _all_ related entries in the JiT catalogue, that were setup during a PtP + * operation. The method is intended to be used by code handling srmAbort command, and by + * srmPutDone. Notice that the Traverse on the parents is NOT removed! This is to accomodate for + * the use case of a user that has run many PtP on different SURLs but that are all contained in + * the same directory tree! In practice, this method removes R and W permissions. If any entry + * does not exist, then nothing happens and a warning gets written in the logs; otherwise entries + * get their start time set to now, and the lifetime set to zero; in case more than one matching + * entry is found, a message gets written to the logs, and the updating continues anyway as + * explained. At this point, when the garbage collector wakes up the entries get cleanly handled + * (physical ACL is removed, catalog entry removed, etc.); or an earlier cleaning can be forced by + * invoking directly the purge mehod. The method returns FALSE in case an entry was not found or + * the supplied parameters were null, and TRUE otherwise. Yet keep in mind that is says nothing of + * whether the DB operation was successful or not. + */ + synchronized public boolean expirePutJiTs(PFN pfn, LocalUser localUser) { + + if (pfn != null && localUser != null) { + return expireJiT(pfn, localUser, FilesystemPermission.Read) + && expireJiT(pfn, localUser, FilesystemPermission.Write); + } + + log.error("VolatileAndJiT CATALOG: programming bug! expirePutJiTs invoked " + + "on null attributes; pfn={} localUser={}", pfn, localUser); + return false; + } + + /** + * Method that purges the catalog, removing expired ACLs and deleting expired Volatile files. When + * Volatile entries expire, any realted JiT will automatically expire too, regardless of the + * specified pinLifetime: that is, fileLifetime wins over pinLifetime. WARNING! Notice that the + * catalogue DOES get cleaned up even if the physical removal of the ACL or erasing of the file + * fails. + */ + public synchronized void purge() { + + log.debug("VolatileAndJiT CATALOG! Executing purge!"); + Calendar rightNow = Calendar.getInstance(); + /** + * removes all expired entries from storm_pin and storm_track, returning two Collections: one + * with the PFN of Volatile files, and the other with PFN + GridUser couple of the entries that + * were just being tracked for the ACLs set up on them. + */ + Collection[] expired = dao.removeExpired(rightNow.getTimeInMillis() / 1000); + Collection expiredVolatile = expired[0]; + Collection expiredJiT = expired[1]; + if (expiredVolatile.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No expired Volatile entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "Volatile entries:\n {}", volatileString(expired[0])); + } + if (expiredJiT.size() == 0) { + log.debug("VolatileAndJiT CATALOG! No JiT entries found."); + } else { + log.info("VolatileAndJiT CATALOG! Found and purged the following expired " + + "JiT ACLs entries:\n {}", jitString(expired[1])); + } + // Remove ACLs + JiTData aux = null; + for (Iterator i = expiredJiT.iterator(); i.hasNext();) { + aux = (JiTData) i.next(); + int jitacl = aux.acl(); + String jitfile = aux.pfn(); + int jituid = aux.uid(); + int jitgid = aux.gid(); + try { + log.info("VolatileAndJiT CATALOG. Removing ACL {} on file {} for " + "user {},{}", jitacl, + jitfile, jituid, jitgid); + LocalFile auxFile = + NamespaceDirector.getNamespace().resolveStoRIbyPFN(PFN.make(jitfile)).getLocalFile(); + LocalUser auxUser = new LocalUser(jituid, jitgid); + FilesystemPermission auxACL = new FilesystemPermission(jitacl); + + AclManager manager = AclManagerFS.getInstance(); + if (auxFile == null) { + log.warn("VolatileAndJiT CATALOG! Unable to setting up the ACL. " + "LocalFile is null!"); + } else { + try { + manager.revokeUserPermission(auxFile, auxUser, auxACL); + } catch (IllegalArgumentException e) { + log.error( + "Unable to revoke user permissions on the file. " + "IllegalArgumentException: {}", + e.getMessage(), e); + } + } + } catch (Exception e) { + log.error( + "VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical ACL {} for user {}, could NOT be removed from {}", + jitacl, jituid, jitgid, jitfile); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + // Delete files + String auxPFN = null; + for (Iterator i = expiredVolatile.iterator(); i.hasNext();) { + auxPFN = (String) i.next(); + try { + log.info("VolatileAndJiT CATALOG. Deleting file {}", auxPFN); + LocalFile auxFile = + NamespaceDirector.getNamespace().resolveStoRIbyPFN(PFN.make(auxPFN)).getLocalFile(); + boolean ok = auxFile.delete(); + if (!ok) { + throw new Exception("Java File deletion failed!"); + } + } catch (Exception e) { + log.error("VolatileAndJiT CATALOG! Entry removed from Catalog, but " + + "physical file {} could NOT be deleted!", auxPFN); + log.error("VolatileAndJiT CATALOG! {}", e.getMessage(), e); + } + } + } + + /** + * Method used upon expiry of SRM_SPACE_AVAILABLE to remove all JiT entries in the DB table, + * related to the given PFN; Notice that _no_ distinction is made aboutthe specific user! This is + * because upon expiry of SRM_SPACE_AVAILABLE the file gets erased, so all JiTs on that file are + * automatically erased. This implies that all catalogue entries get removed. If no entries are + * present nothing happens. + */ + public synchronized void removeAllJiTsOn(PFN pfn) { + + if (pfn != null) { + dao.removeAllJiTsOn(pfn.getValue()); + return; + } + log.error("VolatileAndJiT CATALOG: programming bug! removeAllJiTsOn " + "invoked on null pfn!"); + } + + /** + * Method used to keep track of an ACL set up on a PFN; it needs the PFN, the LocalUser, the ACL + * and the desired pinLifeTime. If the 3-ple (PFN, ACL, LocalUser) is not present, it gets added; + * if it is already present, provided the new desired expiry occurs after the present one, it gets + * changed. If the supplied lifetime is zero, then a default value is used instead. If it is + * larger than a ceiling, that ceiling is used instead. The floor value in seconds can be set from + * the configuration file, with the property: pinLifetime.minimum While the ceiling value in + * seconds is set with: pinLifetime.maximum BEWARE: The intended use case is in both + * srmPrepareToGet and srmPrepareToPut, for the case of the _JiT_ security mechanism. The maximum + * is necessary because JiT ACLs cannot last longer than the amount of time the pool account is + * leased. Notice that for Volatile entries, a pinLifetime larger than the fileLifetime can be + * specified. However, when Volatile files expire any related JiTs automatically expire in + * anticipation! + */ + public synchronized void trackJiT(PFN pfn, LocalUser localUser, FilesystemPermission acl, + Calendar start, TLifeTimeInSeconds pinLifetime) { + + if (pfn != null && localUser != null && acl != null && start != null && pinLifetime != null) { + + String fileName = pfn.getValue(); + int uid = localUser.getUid(); + int gid = localUser.getPrimaryGid(); + int intacl = acl.getInt(); + // seconds needed and not milliseconds! + long pinStart = start.getTimeInMillis() / 1000; + long pinTime = validatePinLifetime(pinLifetime.value()); + int n = dao.numberJiT(fileName, uid, intacl); + if (n == 0) { + dao.addJiT(fileName, uid, gid, intacl, pinStart, pinTime); + } else { + dao.updateJiT(fileName, uid, intacl, pinStart, pinTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for " + + "({}, {}, {}); the catalogue could be corrupt!", fileName, uid, intacl); + } + } + return; + } + log.error( + "VolatileAndJiT CATALOG: programming bug! TrackACL invoked on " + + "null attributes; pfn={} localUser={} acl={} start={} pinLifetime={}", + pfn, localUser, acl, start, pinLifetime); + } + + /** + * Method that adds an entry to the catalog that keeps track of Volatile files. The PFN and the + * fileLifetime are needed. If no entry corresponding to the given PFN is found, a new one gets + * recorded. If the PFN is already present, then provided the new expiry (obtained by adding + * together current-time and requested-lifetime) exceeds the expiry in the catalog, the entry is + * updated. Otherwise nothing takes place. If the supplied fileLifetime is zero, then a default + * value is used instead. This floor default value in seconds can be set from the configuration + * file, with the property: fileLifetime.default BEWARE: The intended use case for this method is + * during srmPrepareToPut. When files are uploaded into StoRM, they get specified as Volatile or + * Permanent. The PtP logic determines if the request is for a Volatile file and in that case it + * adds a new entry in the catalog. That is the purpose of this method. Any subsequent PtP call + * will just result in a modification of the expiry, provided the newer one lasts longer than the + * original one. Yet bear in mind that two or more PtP on the same file makes NO SENSE AT ALL! If + * any DB error occurs, then nothing gets added/updated and an error message gets logged. + */ + public synchronized void trackVolatile(PFN pfn, Calendar start, TLifeTimeInSeconds fileLifetime) { + + if (pfn != null && fileLifetime != null && start != null) { + + String fileName = pfn.getValue(); + long fileTime = fileLifetime.value(); + if (fileTime <= 0) { + fileTime = defaultFileLifetime; + } + long fileStart = start.getTimeInMillis() / 1000; // seconds needed and not + // milliseconds! + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + } else if (n == 0) { + dao.addVolatile(fileName, fileStart, fileTime); + } else { + dao.updateVolatile(fileName, fileStart, fileTime); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + return; + } + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={} fileLifetime={}", pfn, start, fileLifetime); + } + + public synchronized void setStartTime(PFN pfn, Calendar start) throws Exception { + + if (pfn == null || start == null) { + log.warn("VolatileAndJiT CATALOG: programming bug! volatileEntry invoked " + + "on null attributes; pfn={} start={}", pfn, start); + return; + } + + String fileName = pfn.getValue(); + // seconds needed and not milliseconds! + long fileStart = start.getTimeInMillis() / 1000; + int n = dao.numberVolatile(fileName); + if (n == -1) { + log.error("VolatileAndJiT CATALOG! DB problem does not allow to count " + + "number of Volatile entries for {}! Volatile entry NOT processed!", pfn); + return; + } + if (n == 0) { + throw new Exception( + "Unable to update row volatile for pfn \'" + pfn + "\' , not on the database!"); + } + dao.updateVolatile(fileName, fileStart); + if (n > 1) { + log.warn("VolatileAndJiT CATALOG: More than one entry found for {}; " + + "the catalogue could be corrupt!", fileName); + } + } + + /** + * Method that returns a List whose first element is a Calendar with the starting date and time of + * the lifetime of the supplied PFN, and whose second element is the TLifeTime the system is + * keeping the PFN. If no entry is found for the given PFN, an empty List is returned. Likewise if + * any DB error occurs. In any case, proper error messages get logged. Moreover notice that if for + * any reason the value for the Lifetime read from the DB does not allow creation of a valid + * TLifeTimeInSeconds, an Empty one is returned. Error messages in logs warn of the situation. + */ + public synchronized List volatileInfoOn(PFN pfn) { + + ArrayList aux = new ArrayList(); + if (pfn == null) { + log + .error("VolatileAndJiT CATALOG: programming bug! volatileInfoOn " + "invoked on null PFN!"); + return aux; + } + Collection c = dao.volatileInfoOn(pfn.getValue()); + if (c.size() != 2) { + return aux; + } + Iterator i = c.iterator(); + // start time + long startInMillis = i.next().longValue() * 1000; + Calendar auxcal = Calendar.getInstance(); + auxcal.setTimeInMillis(startInMillis); + aux.add(auxcal); + // lifeTime + long lifetimeInSeconds = ((Long) i.next()).longValue(); + TLifeTimeInSeconds auxLifeTime = TLifeTimeInSeconds.makeEmpty(); + try { + auxLifeTime = TLifeTimeInSeconds.make(lifetimeInSeconds, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + log.error( + "VolatileAndJiT CATALOG: programming bug! Retrieved long does " + + "not allow TLifeTimeCreation! long is: {}; error is: {}", + lifetimeInSeconds, e.getMessage(), e); + } + aux.add(auxLifeTime); + return aux; + } + + /** + * Private method used to return a String representation of the expired entries Collection of + * JiTData. + */ + private String jitString(Collection c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + sb.append("file,acl,uid,gid\n"); + JiTData aux = null; + for (Iterator i = c.iterator(); i.hasNext();) { + aux = i.next(); + sb.append(aux.pfn()); + sb.append(","); + sb.append(aux.acl()); + sb.append(","); + sb.append(aux.uid()); + sb.append(","); + sb.append(aux.gid()); + if (i.hasNext()) { + sb.append("\n"); + } + } + return sb.toString(); + } + + /** + * Private method that makes sure that the lifeTime of the request: (1) It is not less than a + * predetermined value: this check is needed because clients may omit to supply a value and some + * default one must be used; moreover, it is feared that if the requested lifetime is very low, + * such as 0 or a few seconds, there could be strange problems in having a file written and erased + * immediately. (2) It is not larger than a given ceiling; this is necessary because in the JiT + * model, the underlying system may decide to remove the pool account mappings; it is paramount + * that no ACLs remain set up for the now un-associated pool account. + */ + private long validatePinLifetime(long lifetime) { + + long duration = lifetime < floor ? floor : lifetime; // adjust for lifetime + // set to zero! + duration = duration <= ceiling ? duration : ceiling; // make sure lifetime + // is not longer than + // the maximum set! + return duration; + } + + /** + * Private method used to return a String representation of the expired entries Collection of pfn + * Strings. + */ + private String volatileString(Collection c) { + + if (c == null) { + return ""; + } + StringBuilder sb = new StringBuilder(); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java deleted file mode 100644 index 8a0c596fd..000000000 --- a/src/main/java/it/grid/storm/catalogs/VolatileAndJiTDAO.java +++ /dev/null @@ -1,889 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.catalogs; - -import com.google.common.collect.Lists; - -import it.grid.storm.config.Configuration; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DAO class for VolatileAndJiTCatalog: it has been specifically designed for - * MySQL. - * - * @author EGRID ICTP - * @version 1.0 (based on old PinnedFilesDAO) - * @date November, 2006 - */ -public class VolatileAndJiTDAO { - - private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAO.class); - - // The name of the class for the DB driver - private final String driver = Configuration.getInstance().getDBDriver(); - - // The URL of the DB - private final String url = Configuration.getInstance().getDBURL(); - - // The password for the DB - private final String password = Configuration.getInstance().getDBPassword(); - - // The name for the DB - private final String name = Configuration.getInstance().getDBUserName(); - - // Connection to DB - private Connection con = null; - - // instance of DAO - private static final VolatileAndJiTDAO dao = new VolatileAndJiTDAO(); - - // timer thread that will run a task to alert when reconnecting is necessary! - private Timer clock = null; - - // timer task that will update the boolean signaling that a reconnection is needed! - private TimerTask clockTask = null; - - // milliseconds that must pass before reconnecting to DB - private final long period = Configuration.getInstance().getDBReconnectPeriod() * 1000; - - // initial delay in milliseconds before starting timer - private final long delay = Configuration.getInstance().getDBReconnectDelay() * 1000; - - // boolean that tells whether reconnection is needed because of MySQL bug! - private boolean reconnect = false; - - private VolatileAndJiTDAO() { - - setUpConnection(); - clock = new Timer(); - clockTask = new TimerTask() { - - @Override - public void run() { - - reconnect = true; - } - }; // clock task - clock.scheduleAtFixedRate(clockTask, delay, period); - } - - /** - * Method that returns the only instance of VolatileAndJiTDAO. - */ - public static VolatileAndJiTDAO getInstance() { - - return dao; - } - - /** - * Method that inserts a new entry in the JiT table of the DB, consisting of - * the specified filename, the local user uid, the local user gid, the acl, - * the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970) - * and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addJiT(String filename, int uid, int gid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addJiT: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, gid); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, acl); - logWarnings(stmt.getWarnings()); - stmt.setLong(5, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(6, pinLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that inserts a new entry in the Volatile table of the DB, consisting - * of the specified filename, the start time as expressed by UNIX epoch - * (seconds since 00:00:00 1 1 1970), and the number of seconds the file must - * be kept for. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - */ - public void addVolatile(String filename, long start, long fileLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. addVolatile: unable to get a valid connection!"); - return; - } - String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(3, fileLifetime); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); - stmt.execute(); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Checks whether the given file exists in the volatile table or not. - * - * @param filename - * @return true if there is antry for the given file in the - * volatilte table, false otherwise. - */ - public boolean exists(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. exists: unable to get a valid connection!"); - return false; - } - String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; - PreparedStatement stmt = null; - ResultSet rs = null; - boolean result; - - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - - log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); - - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - - if (rs.next()) { - result = true; - } else { - result = false; - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", - e.getMessage(), e); - result = false; - } finally { - close(rs); - close(stmt); - } - return result; - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * This method _forces_ the update regardless of the fact that the new expiry - * lasts less than the current one! This method is intended to be used by - * expireJiT. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void forceUpdateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. forceUpdateJiT: unable to get a valid connection!"); - return; - } - String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setLong(1, start); - logWarnings(stmt.getWarnings()); - stmt.setLong(2, pinLifetime); - logWarnings(stmt.getWarnings()); - stmt.setString(3, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(4, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(5, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that returns the number of entries in the catalogue, matching the - * given filename, uid and acl. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberJiT(String filename, int uid, int acl) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. numberJiT: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - stmt.setInt(2, uid); - logWarnings(stmt.getWarnings()); - stmt.setInt(3, acl); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that returns the number of Volatile entries in the catalogue, for - * the given filename. - * - * Notice that in general there should be either one or none, and more should - * be taken as indication of catalogue corruption. - * - * -1 is returned if there are problems with the DB. - */ - public int numberVolatile(String filename) { - - if (!checkConnection()) { - log - .error("VolatileAndJiTDAO. numberVolatile: unable to get a valid connection!"); - return -1; - } - String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - int n = -1; - if (rs.next()) { - n = rs.getInt(1); - } else { - log.error("VolatileAndJiTDAO! Unexpected situation in numberVolatile: " - + "result set empty!"); - } - close(rs); - close(stmt); - return n; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", - e.getMessage(), e); - close(rs); - close(stmt); - return -1; - } - } - - /** - * Method that removes all entries in the JiT table of the DB, that match the - * specified filename. So this action takes place _regardless_ of the user - * that set up the ACL! - */ - public void removeAllJiTsOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeAllJiTsOn: unable to get a " - + "valid connection!"); - return; - } - String sql = "DELETE FROM jit WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method used to remove all expired entries, both of pinned files and of jit - * ACLs. Also, when removing volatile entries, any jit entry that refers to - * those expired volatiles will also be removed. - * - * The method requires a long representing the time measured as UNIX EPOCH - * upon which to base the purging: entries are evaluated expired when compared - * to this date. - * - * The method returns an array of two Collections; Collection[0] contains - * expired volatile entries String PFNs, while Collection[1] contains - * JiTDataTO objects. Collection[1] also contains those entries that may not - * have expired yet, but since the respective Volatile is being removed they - * too must be removed automatically. - * - * WARNING! If any error occurs it gets logged, and an array of two empty - * Collection is returned. This operation is treated as a Transcation by the - * DB, so a Roll Back should return everything to its original state! - */ - public Collection[] removeExpired(long time) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeExpired: unable to get a valid connection!"); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - - String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime 0) { - // there are expired volatile entries: adjust jit selection to include - // those SURLs too! - jit = jit + " OR file IN " + makeFileString(volat); - } - stmt = con.prepareStatement(jit); - logWarnings(con.getWarnings()); - stmt.setLong(1, time); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - Collection track = new ArrayList(); - Collection trackid = new ArrayList(); - JiTData aux = null; - while (rs.next()) { - trackid.add(new Long(rs.getLong("ID"))); - aux = new JiTData(rs.getString("file"), rs.getInt("acl"), - rs.getInt("uid"), rs.getInt("gid")); - track.add(aux); - } - int njit = trackid.size(); - close(rs); - close(stmt); - - // remove entries - Collection volcol = new ArrayList(); - Collection jitcol = new ArrayList(); - try { - con.setAutoCommit(false); // begin transaction! - logWarnings(con.getWarnings()); - // delete volatile - int deletedvol = 0; - if (nvolat > 0) { - delvol = delvol + makeIDString(volatid); - stmt = con.prepareStatement(delvol); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedvol = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - // delete jits - int deletedjit = 0; - if (njit > 0) { - deljit = deljit + makeIDString(trackid); - stmt = con.prepareStatement(deljit); - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); - deletedjit = stmt.executeUpdate(); - logWarnings(stmt.getWarnings()); - close(stmt); - } - con.commit(); - logWarnings(con.getWarnings()); - con.setAutoCommit(true); // end transaction! - logWarnings(con.getWarnings()); - log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " - + "and {} jit catalogue entries.", deletedvol, deletedjit); - volcol = volat; - jitcol = track; - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " - + "rolling back! {}", e.getMessage(), e); - rollback(con); - close(stmt); - } - - // return collections - return new Collection[] { volcol, jitcol }; - } catch (SQLException e) { - close(rs); - close(stmt); - log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", - e.getMessage(), e); - // in case of any failure return an array of two empty Collection - return new Collection[] { new ArrayList(), new ArrayList() }; - } - } - - /** - * Method that removes all entries in the Volatile table of the DB, that match - * the specified filename. - */ - public void removeVolatile(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. removeVolatile: unable to get a valid " - + "connection!"); - return; - } - String sql = "DELETE FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO. removeVolatile: {}", stmt); - int n = stmt.executeUpdate(); - log.debug("VolatileAndJiTDAO. removeVolatile: {} entries removed.", n); - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in removeVolatile: {}", - e.getMessage(), e); - } finally { - close(stmt); - } - } - - /** - * Method that updates an existing entry in the JiT table of the DB, - * consisting of the specified filename, the uid and gid of the local user, - * the acl, the start time as expressed by UNIX epoch (seconds since 00:00:00 - * 1 1 1970), and the number of seconds the jit must last. - * - * In the DB, the start time gets translated into DATE:TIME in order to make - * it more readable. pinLifetime remains in seconds. - * - * Entries get updated only if the new expiry calculated by adding start and - * pinLifetime, is larger than the existing one. - * - * Only start and pinLifetime get updated, while filename, uid, gid and acl, - * are used as criteria to select records. - */ - public void updateJiT(String filename, int uid, int acl, long start, - long pinLifetime) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. updateJiT: unable to get a valid " - + "connection!"); - return; - } - String sql = "UPDATE jit " - + "SET start=FROM_UNIXTIME(?), pinLifetime=? " - + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { - - if (!checkConnection()) { - log.error("VolatileAndJiTDAO. volatileInfoOn: unable to get a valid connection!"); - return Lists.newArrayList(); - } - String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; - PreparedStatement stmt = null; - ResultSet rs = null; - List aux = Lists.newArrayList(); - try { - stmt = con.prepareStatement(sql); - logWarnings(con.getWarnings()); - stmt.setString(1, filename); - logWarnings(stmt.getWarnings()); - log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); - rs = stmt.executeQuery(); - logWarnings(stmt.getWarnings()); - if (rs.next()) { - aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); - aux.add(rs.getLong("fileLifetime")); - } else { - log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); - } - } catch (SQLException e) { - log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", - e.getMessage(), e); - } finally { - close(rs); - close(stmt); - } - return aux; - } - - /** - * Auxiliary method that checks if time for resetting the connection has come, - * and eventually takes it down and up back again. - */ - private boolean checkConnection() { - - boolean response = true; - if (reconnect) { - log.debug("VolatileAndJiTDAO: reconnecting to DB. "); - takeDownConnection(); - response = setUpConnection(); - if (response) { - reconnect = false; - } - } - return response; - } - - /** - * Auxiliary method that closes a ResultSet and handles all possible - * exceptions. - */ - private void close(ResultSet rset) { - - if (rset != null) { - try { - rset.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close ResultSet - Error: {}", - e.getMessage(), e); - } - } - } - - /** - * Auxiliary method that closes a Statement and handles all possible - * exceptions. - */ - private void close(Statement stmt) { - - if (stmt != null) { - try { - stmt.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Unable to close Statement {} - Error: {}", - stmt.toString(), e.getMessage(), e); - } - } - } - - /** - * Auxiliary method used to log warnings. - */ - private void logWarnings(SQLWarning warning) { - - if (warning != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - while ((warning = warning.getNextWarning()) != null) { - log.debug("VolatileAndJiTDAO: {}", warning); - } - } - } - - /** - * Method that returns a String containing all Files. - */ - private String makeFileString(Collection files) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = files.iterator(); i.hasNext();) { - sb.append("'"); - sb.append((String) i.next()); - sb.append("'"); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Method that returns a String containing all IDs. - */ - private String makeIDString(Collection rowids) { - - StringBuilder sb = new StringBuilder("("); - for (Iterator i = rowids.iterator(); i.hasNext();) { - sb.append(i.next()); - if (i.hasNext()) { - sb.append(","); - } - } - sb.append(")"); - return sb.toString(); - } - - /** - * Auxiliary method used to roll back a transaction and handles all possible - * exceptions. - */ - private void rollback(Connection con) { - - if (con != null) { - try { - con.rollback(); - logWarnings(con.getWarnings()); - log.error("VolatileAndJiTDAO! Roll back successful!"); - } catch (SQLException e3) { - log.error("VolatileAndJiTDAO! Roll back failed! {}", e3.getMessage(), e3); - } - } - } - - /** - * Auxiliary method that sets up the connection to the DB. - */ - private boolean setUpConnection() { - - boolean response = false; - try { - Class.forName(driver); - con = DriverManager.getConnection(url, name, password); - response = con.isValid(0); - logWarnings(con.getWarnings()); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in setUpconnection! {}", - e.getMessage(), e); - } - return response; - } - - /** - * Auxiliary method that takes down a connection to the DB. - */ - private void takeDownConnection() { - - if (con != null) { - try { - con.close(); - } catch (Exception e) { - log.error("VolatileAndJiTDAO! Exception in takeDownConnection! {}", - e.getMessage(), e); - } - } - } -} diff --git a/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java new file mode 100644 index 000000000..506003e09 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/RequestFinalizerService.java @@ -0,0 +1,46 @@ +package it.grid.storm.catalogs.executors; + +import static java.util.concurrent.TimeUnit.SECONDS; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; + +import it.grid.storm.catalogs.executors.threads.BoLFinalizer; +import it.grid.storm.catalogs.executors.threads.PtGFinalizer; +import it.grid.storm.catalogs.executors.threads.PtPFinalizer; +import it.grid.storm.config.Configuration; + +public class RequestFinalizerService { + + private final long delay; + private final long period; + + private ScheduledExecutorService executor; + private PtPFinalizer ptpTask; + private BoLFinalizer bolTask; + private PtGFinalizer ptgTask; + + public RequestFinalizerService(Configuration config) { + + delay = config.getTransitInitialDelay() * 1000L; + period = config.getTransitTimeInterval() * 1000L; + executor = Executors.newScheduledThreadPool(3); + ptpTask = new PtPFinalizer(config.getInProgressPutRequestExpirationTime()); + bolTask = new BoLFinalizer(); + ptgTask = new PtGFinalizer(); + + } + + public void start() { + + executor.scheduleAtFixedRate(ptpTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(bolTask, delay, period, SECONDS); + executor.scheduleAtFixedRate(ptgTask, delay, period, SECONDS); + + } + + public void stop() { + + executor.shutdown(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java new file mode 100644 index 000000000..3dd64a36f --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/BoLFinalizer.java @@ -0,0 +1,39 @@ +package it.grid.storm.catalogs.executors.threads; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.impl.mysql.BoLChunkDAOMySql; + + +public class BoLFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(BoLFinalizer.class); + + private final BoLChunkDAO dao; + + public BoLFinalizer() { + + dao = BoLChunkDAOMySql.getInstance(); + } + + @Override + public void run() { + + log.debug("BoL finalizer started .."); + + try { + + int n = dao.releaseExpiredAndSuccessfulRequests(); + if (n > 0) { + log.info("Released {} expired and successful BoL requests", n); + } + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java new file mode 100644 index 000000000..8d3026a09 --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtGFinalizer.java @@ -0,0 +1,46 @@ +package it.grid.storm.catalogs.executors.threads; + +import java.util.Collection; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtGChunkDAOMySql; +import it.grid.storm.srm.types.TSURL; + + +public class PtGFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtGFinalizer.class); + + private final PtGChunkDAO dao; + + public PtGFinalizer() { + + dao = PtGChunkDAOMySql.getInstance(); + } + + @Override + public void run() { + + log.debug("PtG finalizer started .."); + + try { + + Collection surls = dao.transitExpiredSRM_FILE_PINNED(); + + if (surls.size() > 0) { + log.info("Moved {} expired and successful PtG requests to SRM_FILE_PINNED", surls.size()); + log.debug("Released surls:"); + surls.forEach(surl -> { + log.debug("{}", surl); + }); + } + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + } + } +} diff --git a/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java new file mode 100644 index 000000000..57ba6c64d --- /dev/null +++ b/src/main/java/it/grid/storm/catalogs/executors/threads/PtPFinalizer.java @@ -0,0 +1,86 @@ +package it.grid.storm.catalogs.executors.threads; + +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; + +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.impl.mysql.PtPChunkDAOMySql; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; +import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; + + +public class PtPFinalizer implements Runnable { + + private static final Logger log = LoggerFactory.getLogger(PtPFinalizer.class); + + private static final String NAME = "Expired-PutRequests-Agent"; + + private long inProgressRequestsExpirationTime; + private final PtPChunkDAO dao; + + public PtPFinalizer(long inProgressRequestsExpirationTime) { + + this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; + dao = PtPChunkDAOMySql.getInstance(); + log.info("{} created.", NAME); + } + + @Override + public void run() { + + log.debug("{} run.", NAME); + try { + + transitExpiredLifetimeRequests(); + transitExpiredInProgressRequests(); + + } catch (Exception e) { + + log.error("{}: {}", e.getClass(), e.getMessage(), e); + + } + } + + private void transitExpiredLifetimeRequests() { + + Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); + log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); + + if (expiredRequests.isEmpty()) { + return; + } + + expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); + + int count = + dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(expiredRequests.keySet()); + log.info("{} updated expired put requests - {} db rows affected", NAME, count); + } + + private void executePutDone(Long id, String surl) { + + try { + + if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { + log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); + } + + } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { + + log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, surl, + e.getMessage(), e); + } + } + + private void transitExpiredInProgressRequests() { + + int count = dao.transitLongTimeInProgressRequestsToStatus(inProgressRequestsExpirationTime, SRM_FAILURE, "Request timeout"); + log.debug("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java index 76bcefb72..e21aec50e 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java +++ b/src/main/java/it/grid/storm/catalogs/surl/SURLStatusManagerImpl.java @@ -2,12 +2,13 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.CopyChunkCatalog; import it.grid.storm.catalogs.PtGChunkCatalog; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.impl.mysql.SURLStatusDAOMySql; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; @@ -24,7 +25,7 @@ public class SURLStatusManagerImpl implements SURLStatusManager { public boolean abortAllGetRequestsForSURL(GridUserInterface user, TSURL surl, String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtGsForSURL(user, surl, explanation); } @@ -33,7 +34,7 @@ public boolean abortAllGetRequestsForSURL(GridUserInterface user, TSURL surl, public boolean abortAllPutRequestsForSURL(GridUserInterface user, TSURL surl, String explanation) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.abortActivePtPsForSURL(user, surl, explanation); } @@ -61,11 +62,6 @@ public boolean abortRequest(GridUserInterface user, TRequestToken token, TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); break; - case COPY: - CopyChunkCatalog.getInstance().updateFromPreviousStatus(token, - TStatusCode.SRM_REQUEST_QUEUED, TStatusCode.SRM_ABORTED, explanation); - break; - case EMPTY: break; @@ -137,7 +133,7 @@ public boolean failRequestForSURL(GridUserInterface user, public Map getPinnedSURLsForUser( GridUserInterface user, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, surls); } @@ -145,7 +141,7 @@ public Map getPinnedSURLsForUser( public Map getPinnedSURLsForUser( GridUserInterface user, TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getPinnedSURLsForUser(user, token, surls); } @@ -154,7 +150,7 @@ public Map getPinnedSURLsForUser( public Map getSURLStatuses(GridUserInterface user, TRequestToken token) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token); } @@ -163,28 +159,28 @@ public Map getSURLStatuses(GridUserInterface user, TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.getSURLStatuses(token, surls); } @Override public boolean isSURLBusy(TRequestToken requestTokenToExclude, TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, requestTokenToExclude); } @Override public boolean isSURLBusy(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtPs(surl, null); } @Override public boolean isSURLPinned(TSURL surl) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.surlHasOngoingPtGs(surl); } @@ -212,7 +208,7 @@ private RequestSummaryData lookupRequest(TRequestToken token) { @Override public int markSURLsReadyForRead(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); return dao.markSURLsReadyForRead(token, surls); } @@ -220,7 +216,7 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { @Override public void releaseSURLs(GridUserInterface user, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(user, surls); } @@ -228,7 +224,7 @@ public void releaseSURLs(GridUserInterface user, List surls) { @Override public void releaseSURLs(TRequestToken token, List surls) { - final SURLStatusDAO dao = new SURLStatusDAO(); + final SURLStatusDAO dao = SURLStatusDAOMySql.getInstance(); dao.releaseSURLs(token, surls); } diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java b/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java deleted file mode 100644 index c205b6a46..000000000 --- a/src/main/java/it/grid/storm/catalogs/timertasks/ExpiredPutRequestsAgent.java +++ /dev/null @@ -1,93 +0,0 @@ -package it.grid.storm.catalogs.timertasks; - -import it.grid.storm.catalogs.PtPChunkDAO; -import it.grid.storm.srm.types.InvalidTSURLAttributesException; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommand; -import it.grid.storm.synchcall.command.datatransfer.PutDoneCommandException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.TimerTask; - - -public class ExpiredPutRequestsAgent extends TimerTask { - - private static final Logger log = LoggerFactory.getLogger(ExpiredPutRequestsAgent.class); - - private static final String NAME = "Expired-PutRequests-Agent"; - - private long inProgressRequestsExpirationTime; - - public ExpiredPutRequestsAgent(long inProgressRequestsExpirationTime) { - - this.inProgressRequestsExpirationTime = inProgressRequestsExpirationTime; - log.info("{} created.", NAME); - } - - @Override - public synchronized void run() { - - log.debug("{} run.", NAME); - try { - - transitExpiredLifetimeRequests(); - transitExpiredInProgressRequests(); - - } catch (Exception e) { - - log.error("{}: {}", e.getClass(), e.getMessage(), e); - - } - } - - private void transitExpiredLifetimeRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - Map expiredRequests = dao.getExpiredSRM_SPACE_AVAILABLE(); - log.debug("{} lifetime-expired requests found ... ", NAME, expiredRequests.size()); - - if (expiredRequests.isEmpty()) { - return; - } - - expiredRequests.entrySet().forEach(e -> executePutDone(e.getKey(), e.getValue())); - - int count = dao.transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( - expiredRequests.keySet()); - log.info("{} updated expired put requests - {} db rows affected", NAME, count); - } - - private void executePutDone(Long id, String surl) { - - try { - - if (PutDoneCommand.executePutDone(TSURL.makeFromStringValidate(surl))) { - log.info("{} successfully executed a srmPutDone on surl {}", NAME, surl); - } - - } catch (InvalidTSURLAttributesException | PutDoneCommandException e) { - - log.error("{}. Unable to execute PutDone on request with id {} and surl {}: ", NAME, id, - surl, e.getMessage(), e); - } - } - - private void transitExpiredInProgressRequests() { - - PtPChunkDAO dao = PtPChunkDAO.getInstance(); - List expiredRequestsIds = - dao.getExpiredSRM_REQUEST_INPROGRESS(inProgressRequestsExpirationTime); - log.debug("{} expired in-progress requests found.", expiredRequestsIds.size()); - - if (expiredRequestsIds.isEmpty()) { - return; - } - - int count = dao.transitExpiredSRM_REQUEST_INPROGRESStoSRM_FAILURE(expiredRequestsIds); - log.info("{} moved in-progress put requests to failure - {} db rows affected", NAME, count); - } -} diff --git a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java index 090fc0920..80bbd525d 100644 --- a/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java +++ b/src/main/java/it/grid/storm/catalogs/timertasks/RequestsGarbageCollector.java @@ -6,168 +6,161 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.BoLChunkCatalog; -import it.grid.storm.catalogs.PtGChunkCatalog; -import it.grid.storm.catalogs.RequestSummaryDAO; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.impl.mysql.RequestSummaryDAOMySql; import it.grid.storm.tape.recalltable.TapeRecallCatalog; public class RequestsGarbageCollector extends TimerTask { - private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); + private static final Logger log = LoggerFactory.getLogger(RequestsGarbageCollector.class); - private final Configuration config = Configuration.getInstance(); - private final RequestSummaryDAO dao = RequestSummaryDAO.getInstance(); - private final PtGChunkCatalog ptgCat = PtGChunkCatalog.getInstance(); - private final BoLChunkCatalog bolCat = BoLChunkCatalog.getInstance(); + private final Configuration config = Configuration.getInstance(); + private final RequestSummaryDAO dao = RequestSummaryDAOMySql.getInstance(); - private Timer handler; - private long delay; + private Timer handler; + private long delay; - public RequestsGarbageCollector(Timer handlerTimer, long delay) { + public RequestsGarbageCollector(Timer handlerTimer, long delay) { - this.delay = delay; - handler = handlerTimer; - } + this.delay = delay; + handler = handlerTimer; + } - @Override - public void run() { + @Override + public void run() { - try { + try { - TGarbageData gd = purgeExpiredRequests(); + TGarbageData gd = purgeExpiredRequests(); - if (gd.getTotalPurged() == 0) { + if (gd.getTotalPurged() == 0) { - log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", - config.getExpiredRequestTime()); + log.trace("GARBAGE COLLECTOR didn't find completed requests older than {} seconds", + config.getExpiredRequestTime()); - } else { + } else { - log.info( - "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", - gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), - config.getExpiredRequestTime()); + log.info( + "GARBAGE COLLECTOR removed < {} > completed requests (< {} > recall) older than {} seconds", + gd.getTotalPurgedRequests(), gd.getTotalPurgedRecalls(), + config.getExpiredRequestTime()); - } + } - long nextDelay = computeNextDelay(gd); + long nextDelay = computeNextDelay(gd); - if (nextDelay != delay) { + if (nextDelay != delay) { - log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); - delay = nextDelay; + log.info("GARBAGE COLLECTOR: tuning new interval to {} seconds", nextDelay / 1000); + delay = nextDelay; - } + } - } catch (Exception t) { + } catch (Exception t) { - /* useful to prevent unexpected exceptions that would kill the GC */ - log.error(t.getMessage(), t); + /* useful to prevent unexpected exceptions that would kill the GC */ + log.error(t.getMessage(), t); - } finally { + } finally { - reschedule(); - } - } + reschedule(); + } + } - /** - * Delete from database the completed requests older than a specified and configurable value. - * - * @return A TGarbageData object containing info about the deleted requests - */ - private TGarbageData purgeExpiredRequests() { + /** + * Delete from database the completed requests older than a specified and configurable value. + * + * @return A TGarbageData object containing info about the deleted requests + */ + private TGarbageData purgeExpiredRequests() { - if (!enabled()) { - return TGarbageData.EMPTY; - } + if (!enabled()) { + return TGarbageData.EMPTY; + } - long expirationTime = config.getExpiredRequestTime(); - int purgeSize = config.getPurgeBatchSize(); + long expirationTime = config.getExpiredRequestTime(); + int purgeSize = config.getPurgeBatchSize(); - int nRequests = purgeExpiredRequests(expirationTime, purgeSize); - int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); + int nRequests = purgeExpiredRequests(expirationTime, purgeSize); + int nRecalls = purgeExpiredRecallRequests(expirationTime, purgeSize); - return new TGarbageData(nRequests, nRecalls); - } + return new TGarbageData(nRequests, nRecalls); + } - /** - * Check if Garbage Collector is enabled or not. - * - * @return If the purger is enabled. False otherwise. - */ - private boolean enabled() { + /** + * Check if Garbage Collector is enabled or not. + * + * @return If the purger is enabled. False otherwise. + */ + private boolean enabled() { - return config.getExpiredRequestPurging(); - } + return config.getExpiredRequestPurging(); + } - /** - * Method used to purge from db a bunch of completed requests, older than the - * specified @expiredRequestTime. - * - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @param expiredRequestTime The number of seconds after that a request can be considered - * expired - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + /** + * Method used to purge from db a bunch of completed requests, older than the + * specified @expiredRequestTime. + * + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @param expiredRequestTime The number of seconds after that a request can be considered expired + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRequests(long expiredRequestTime, int purgeSize) { - ptgCat.transitExpiredSRM_FILE_PINNED(); - bolCat.transitExpiredSRM_SUCCESS(); + return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); - return dao.purgeExpiredRequests(expiredRequestTime, purgeSize).size(); + } - } + /** + * Method used to clear a bunch of completed recall requests from database. + * + * @param expirationTime The number of seconds that must pass before considering a request as + * expired + * @param purgeSize The maximum size of the bunch of expired requests that must be deleted + * @return The number of requests involved. + */ + private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { - /** - * Method used to clear a bunch of completed recall requests from database. - * - * @param expirationTime The number of seconds that must pass before considering a request as - * expired - * @param purgeSize The maximum size of the bunch of expired requests that must be deleted - * @return The number of requests involved. - */ - private synchronized int purgeExpiredRecallRequests(long expirationTime, int purgeSize) { + return TapeRecallCatalog.getInstance().purgeCatalog(expirationTime, purgeSize); + } - return new TapeRecallCatalog().purgeCatalog(expirationTime, purgeSize); - } + /** + * Compute a new delay. It will be decreased if the number of purged requests is equal to the + * purge.size value. Otherwise, it will be increased until default value. + * + * @return the computed next interval predicted from last removed requests info + */ + private long computeNextDelay(TGarbageData gd) { - /** - * Compute a new delay. It will be decreased if the number of purged requests is equal to the - * purge.size value. Otherwise, it will be increased until default value. - * - * @return the computed next interval predicted from last removed requests info - */ - private long computeNextDelay(TGarbageData gd) { + /* max delay from configuration in milliseconds */ + long maxDelay = config.getRequestPurgerPeriod() * 1000L; + /* min delay accepted in milliseconds */ + long minDelay = 10000L; - /* max delay from configuration in milliseconds */ - long maxDelay = config.getRequestPurgerPeriod() * 1000L; - /* min delay accepted in milliseconds */ - long minDelay = 10000L; + long nextDelay; - long nextDelay; + /* Check purged requests value */ + if (gd.getTotalPurged() >= config.getPurgeBatchSize()) { - /* Check purged requests value */ - if (gd.getTotalPurged() >= config.getPurgeBatchSize()) { + /* bunch size reached: decrease interval */ + nextDelay = Math.max(delay / 2, minDelay); - /* bunch size reached: decrease interval */ - nextDelay = Math.max(delay / 2, minDelay); + } else { - } else { + /* bunch size not reached: increase interval */ + nextDelay = Math.min(delay * 2, maxDelay); - /* bunch size not reached: increase interval */ - nextDelay = Math.min(delay * 2, maxDelay); + } - } + return nextDelay; + } - return nextDelay; - } - - /** - * Schedule another task after @delay milliseconds. - */ - private void reschedule() { - - handler.schedule(new RequestsGarbageCollector(handler, delay), delay); - } + /** + * Schedule another task after @delay milliseconds. + */ + private void reschedule() { + + handler.schedule(new RequestsGarbageCollector(handler, delay), delay); + } } diff --git a/src/main/java/it/grid/storm/config/Configuration.java b/src/main/java/it/grid/storm/config/Configuration.java index bed292e98..207d26fa2 100644 --- a/src/main/java/it/grid/storm/config/Configuration.java +++ b/src/main/java/it/grid/storm/config/Configuration.java @@ -20,6 +20,7 @@ import static it.grid.storm.info.du.DiskUsageService.DEFAULT_INITIAL_DELAY; import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_INTERVAL; import static it.grid.storm.info.du.DiskUsageService.DEFAULT_TASKS_PARALLEL; +import static java.lang.String.format; import static java.lang.System.getProperty; import java.io.File; @@ -32,8 +33,6 @@ import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.lang.ArrayUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; @@ -56,8 +55,6 @@ public class Configuration { "/etc/storm/backend-server/storm.properties"; public static final int DEFAULT_STORM_CONFIG_REFRESH_RATE = 0; - private static final Logger log = LoggerFactory.getLogger(Configuration.class); - private final ConfigReader cr; private static Configuration instance; @@ -72,14 +69,17 @@ public class Configuration { private static final String SERVICE_HOSTNAME_KEY = "storm.service.FE-public.hostname"; private static final String SERVICE_PORT_KEY = "storm.service.port"; private static final String LIST_OF_MACHINE_IPS_KEY = "storm.service.FE-list.IPs"; - private static final String DB_DRIVER_KEY = "storm.service.request-db.dbms-vendor"; - private static final String DB_URL_1KEY = "storm.service.request-db.protocol"; - private static final String DB_URL_2KEY = "storm.service.request-db.host"; - private static final String DB_URL_3KEY = "storm.service.request-db.db-name"; - private static final String DB_USER_NAME_KEY = "storm.service.request-db.username"; - private static final String DB_PASSWORD_KEY = "storm.service.request-db.passwd"; - private static final String DB_RECONNECT_PERIOD_KEY = "asynch.db.ReconnectPeriod"; - private static final String DB_RECONNECT_DELAY_KEY = "asynch.db.DelayPeriod"; + + private static final String DB_HOSTNAME_KEY = "persistence.db.hostname"; + private static final String DB_USERNAME_KEY = "persistence.db.username"; + private static final String DB_PASSWORD_KEY = "persistence.db.password"; + + private static final String DB_POOL_SIZE_KEY = "persistence.db.pool.size"; + private static final String DB_POOL_MIN_IDLE_KEY = "persistence.db.pool.min_idle"; + private static final String DB_POOL_MAX_WAIT_MILLIS_KEY = "persistence.db.pool.max_wait_millis"; + private static final String DB_POOL_TEST_ON_BORROW_KEY = "persistence.db.pool.test_on_borrow"; + private static final String DB_POOL_TEST_WHILE_IDLE_KEY = "persistence.db.pool.test_while_idle"; + private static final String CLEANING_INITIAL_DELAY_KEY = "gc.pinnedfiles.cleaning.delay"; private static final String CLEANING_TIME_INTERVAL_KEY = "gc.pinnedfiles.cleaning.interval"; private static final String FILE_DEFAULT_SIZE_KEY = "fileSize.default"; @@ -95,20 +95,6 @@ public class Configuration { private static final String XMLRPC_MAX_QUEUE_SIZE_KEY = "synchcall.xmlrpc.max_queue_size"; private static final String LIST_OF_DEFAULT_SPACE_TOKEN_KEY = "storm.service.defaultSpaceTokens"; private static final String COMMAND_SERVER_BINDING_PORT_KEY = "storm.commandserver.port"; - private static final String BE_PERSISTENCE_DB_VENDOR_KEY = "persistence.internal-db.dbms-vendor"; - private static final String BE_PERSISTENCE_DBMS_URL_1KEY = "persistence.internal-db.host"; - private static final String BE_PERSISTENCE_DBMS_URL_2KEY = "" + DB_URL_2KEY; - private static final String BE_PERSISTENCE_DB_NAME_KEY = "persistence.internal-db.db-name"; - private static final String BE_PERSISTENCEDB_USER_NAME_1KEY = "persistence.internal-db.username"; - private static final String BE_PERSISTENCEDB_USER_NAME_2KEY = "" + DB_USER_NAME_KEY; - private static final String BE_PERSISTENCE_DB_PASSWORD_1KEY = "persistence.internal-db.passwd"; - private static final String BE_PERSISTENCE_DB_PASSWORD_2KEY = "" + DB_PASSWORD_KEY; - private static final String BE_PERSISTENCE_POOL_DB_KEY = - "persistence.internal-db.connection-pool"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY = - "persistence.internal-db.connection-pool.maxActive"; - private static final String BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY = - "persistence.internal-db.connection-pool.maxWait"; private static final String XMLRPC_SERVER_PORT_KEY = "synchcall.xmlrpc.unsecureServerPort"; private static final String LS_MAX_NUMBER_OF_ENTRY_KEY = "synchcall.directoryManager.maxLsEntry"; private static final String LS_ALL_LEVEL_RECURSIVE_KEY = @@ -316,66 +302,34 @@ public List getListOfMachineIPs() { } /** - * Method used by all DAO Objects to get the DataBase Driver. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.picker.db.driver"; default value="com.mysql.jdbc.Driver"; + * Method used by all DAO Objects to get the DataBase Driver. + * Deprecated property: "storm.service.request-db.dbms-vendor" + * @return "com.mysql.jdbc.Driver" */ public String getDBDriver() { - if (!cr.getConfiguration().containsKey(DB_DRIVER_KEY)) { - return "com.mysql.jdbc.Driver"; - } - String vendor = cr.getConfiguration().getString(DB_DRIVER_KEY); - String driver = ""; - if ("mysql".equalsIgnoreCase(vendor)) { - driver = "com.mysql.jdbc.Driver"; - } else { - log.error("CONFIG ERROR 'RDBMS Vendor ('{}') unknown.'", vendor); - } - return driver; + return "com.mysql.jdbc.Driver"; } /** - * Method used by all DAO Objects to get DB URL. If no value is found in the configuration medium, - * then the default value is returned instead. key1="asynch.picker.db.protocol"; default - * value="jdbc:mysql://"; key2="asynch.picker.db.host"; default value="localhost"; - * key3="asynch.picker.db.name"; default value="storm_db"; The returned value is made up of the - * above default values and whatever is read from the configuration medium, combined in the - * following way: protocol + host + "/" + name + * Get storm_db URL. */ - public String getDBURL() { + public String getStormDbURL() { - String prefix = ""; - String host = ""; - String name = ""; - // get prefix... - if (!cr.getConfiguration().containsKey(DB_URL_1KEY)) { - // use default - prefix = "jdbc:mysql://"; - } else { - // load from external source - prefix = cr.getConfiguration().getString(DB_URL_1KEY); - } - // get host... - if (!cr.getConfiguration().containsKey(DB_URL_2KEY)) { - // use default - host = "localhost"; - } else { - // load from external source - host = cr.getConfiguration().getString(DB_URL_2KEY); - } - // get db name... - if (!cr.getConfiguration().containsKey(DB_URL_3KEY)) { - // use default - name = "storm_db"; - } else { - // load from external source - name = cr.getConfiguration().getString(DB_URL_3KEY); - } - // return value... - return prefix + host + "/" + name; + String host = cr.getConfiguration().getString(DB_HOSTNAME_KEY, "localhost"); + return format("jdbc:mysql://%s/storm_db", host); + } + + /** + * Get storm_be_ISAM URL. + */ + public String getStormBeIsamURL() { + + String host = cr.getConfiguration().getString(DB_HOSTNAME_KEY, "localhost"); + return format("jdbc:mysql://%s/storm_be_ISAM", host); } + /** * Method used by all DAO Objects to get the DB username. If no value is found in the * configuration medium, then the default value is returned instead. Default value = "storm"; key @@ -383,12 +337,12 @@ public String getDBURL() { */ public String getDBUserName() { - return cr.getConfiguration().getString(DB_USER_NAME_KEY, "storm"); + return cr.getConfiguration().getString(DB_USERNAME_KEY, "storm"); } /** * Method used by all DAO Objects to get the DB password. If no value is found in the - * configuration medium, then the default value is returned instead. Deafult value = "storm"; key + * configuration medium, then the default value is returned instead. Default value = "storm"; key * searched in medium = "asynch.picker.db.passwd". */ public String getDBPassword() { @@ -400,35 +354,6 @@ public String getDBPassword() { * END definition of MANDATORY PROPERTIES */ - /** - * Method used by all DAOs to establish the reconnection period in _seconds_: after such period - * the DB connection will be closed and re-opened. Beware that after such time expires, the - * connection is _not_ automatically closed and reopened; rather, it acts as a flag that is - * considered by the main code and when the most appropriate time comes, the connection is closed - * and reopened. This is because of MySQL bug that does not allow a connection to remain open for - * an arbitrary amount of time! Else an Unexpected EOF Exception gets thrown by the JDBC driver! - * If no value is found in the configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectPeriod"; default value=18000; Keep in mind that 18000 seconds = 5 - * hours. - */ - public long getDBReconnectPeriod() { - - return cr.getConfiguration().getLong(DB_RECONNECT_PERIOD_KEY, 18000); - } - - /** - * Method used by all DAOs to establish the reconnection delay in _seconds_: when StoRM is first - * launched it will wait for this amount of time before starting the timer. This is because of - * MySQL bug that does not allow a connection to remain open for an arbitrary amount of time! Else - * an Unexpected EOF Exception gets thrown by the JDBC driver! If no value is found in the - * configuration medium, then the default value is returned instead. - * key="asynch.db.ReconnectDelay"; default value=30; - */ - public long getDBReconnectDelay() { - - return cr.getConfiguration().getLong(DB_RECONNECT_DELAY_KEY, 30); - } - /** * Method used by PinnedFilesCatalog to get the initial delay in _seconds_ before starting the * cleaning thread. If no value is found in the configuration medium, then the default value is @@ -587,111 +512,54 @@ public int getCommandServerBindingPort() { } /** - * Method used in Persistence Component It returns the DB vendor name. If no value is found in the - * configuration medium, then the default value is returned instead. key="persistence.db.vendor"; - * default value="mysql"; - */ - public String getBEPersistenceDBVendor() { - - return cr.getConfiguration().getString(BE_PERSISTENCE_DB_VENDOR_KEY, "mysql"); - } - - /** - * Method used in Persistence Component: it returns the host where the DB resides. If no value is - * found in the configuration medium, then the default value is returned instead. - * key="persistence.db.host"; default value="localhost"; - */ - public String getBEPersistenceDBMSUrl() { - - if (cr.getConfiguration().containsKey(BE_PERSISTENCE_DBMS_URL_1KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCE_DBMS_URL_1KEY); - } - - if (cr.getConfiguration().containsKey(BE_PERSISTENCE_DBMS_URL_2KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCE_DBMS_URL_2KEY); - } - return "localhost"; - } - - /** - * Method used in Persistence Component it returns the name of the DB to use. If no value is found - * in the configuration medium, then the default value is returned instead. - * key="persistence.db.name"; default value="storm_be_ISAM"; + * Method used in Persistence Component It returns the DB vendor name. + * Deprecated property: "persistence.internal-db.dbms-vendor"; + * @return "mysql"; */ - public String getBEPersistenceDBName() { + public String getDbmsVendor() { - return cr.getConfiguration().getString(BE_PERSISTENCE_DB_NAME_KEY, "storm_be_ISAM"); + return "mysql"; } /** - * Method used in Persistence Component it returns the name of the DB user that must be used. If - * no value is found in the configuration medium, then the default value is returned instead. - * key="persistence.db.username"; default value="storm"; + * Sets the maximum total number of idle and borrows connections that can be active at the same time. Use a negative + * value for no limit. */ - public String getBEPersistenceDBUserName() { - - if (cr.getConfiguration().containsKey(BE_PERSISTENCEDB_USER_NAME_1KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCEDB_USER_NAME_1KEY); - } - - if (cr.getConfiguration().containsKey(BE_PERSISTENCEDB_USER_NAME_2KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCEDB_USER_NAME_2KEY); - } + public int getDbPoolSize() { - return "storm"; + return cr.getConfiguration().getInt(DB_POOL_SIZE_KEY, -1); } /** - * Method used in Persistence Component it returns the password for the DB user that must be used. - * If no value is found in the configuration medium, then the default value is returned instead. - * key="persistence.db.passwd"; default value="storm"; + * Sets the minimum number of idle connections in the pool. */ - public String getBEPersistenceDBPassword() { - - if (cr.getConfiguration().containsKey(BE_PERSISTENCE_DB_PASSWORD_1KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCE_DB_PASSWORD_1KEY); - } - - if (cr.getConfiguration().containsKey(BE_PERSISTENCE_DB_PASSWORD_2KEY)) { - return cr.getConfiguration().getString(BE_PERSISTENCE_DB_PASSWORD_2KEY); - } + public int getDbPoolMinIdle() { - return "storm"; + return cr.getConfiguration().getInt(DB_POOL_MIN_IDLE_KEY, 10); } /** - * Method used in Persistence Component it returns a boolean indicating whether to use connection - * pooling or not. If no value is found in the configuration medium, then the default value is - * returned instead. key="persistence.db.pool"; default value=false; + * Sets the MaxWaitMillis property. Use -1 to make the pool wait indefinitely. */ - public boolean getBEPersistencePoolDB() { + public int getDbPoolMaxWaitMillis() { - return cr.getConfiguration().getBoolean(BE_PERSISTENCE_POOL_DB_KEY, false); + return cr.getConfiguration().getInt(DB_POOL_MAX_WAIT_MILLIS_KEY, 5000); } /** - * Method used in Persistence Component it returns an int indicating the maximum number of active - * connections in the connection pool. It is the maximum number of active connections that can be - * allocated from this pool at the same time... 0 (zero) for no limit. If no value is found in the - * configuration medium, then the default value is returned instead. - * key="persistence.db.pool.maxActive"; default value=10; + * This property determines whether or not the pool will validate objects before they are borrowed from the pool. */ - public int getBEPersistencePoolDBMaxActive() { + public boolean isDbPoolTestOnBorrow() { - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_ACTIVE_KEY, 10); + return cr.getConfiguration().getBoolean(DB_POOL_TEST_ON_BORROW_KEY, true); } /** - * Method used in Persistence Component it returns an int indicating the maximum waiting time in - * _milliseconds_ for the connection in the pool. It represents the time that the pool will wait - * (when there are no available connections) for a connection to be returned before throwing an - * exception... a value of -1 to wait indefinitely. If no value is found in the configuration - * medium, then the default value is returned instead. key="persistence.db.pool.maxWait"; default - * value=50; + * This property determines whether or not the idle object evictor will validate connections. */ - public int getBEPersistencePoolDBMaxWait() { + public boolean isDbPoolTestWhileIdle() { - return cr.getConfiguration().getInt(BE_PERSISTENCE_POOL_DB_MAX_WAIT_KEY, 50); + return cr.getConfiguration().getBoolean(DB_POOL_TEST_WHILE_IDLE_KEY, true); } /** diff --git a/src/main/java/it/grid/storm/info/SpaceInfoManager.java b/src/main/java/it/grid/storm/info/SpaceInfoManager.java index 8b88113a2..dff3fb9e3 100644 --- a/src/main/java/it/grid/storm/info/SpaceInfoManager.java +++ b/src/main/java/it/grid/storm/info/SpaceInfoManager.java @@ -52,7 +52,7 @@ public class SpaceInfoManager { private static final Logger log = LoggerFactory.getLogger(SpaceInfoManager.class); // Reference to the Catalog - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); + private final ReservedSpaceCatalog spaceCatalog = ReservedSpaceCatalog.getInstance(); // Reference to the NamespaceDirector private final NamespaceInterface namespace = NamespaceDirector.getNamespace(); diff --git a/src/main/java/it/grid/storm/info/du/DiskUsageService.java b/src/main/java/it/grid/storm/info/du/DiskUsageService.java index edced7c6d..b47642b5a 100644 --- a/src/main/java/it/grid/storm/info/du/DiskUsageService.java +++ b/src/main/java/it/grid/storm/info/du/DiskUsageService.java @@ -71,37 +71,16 @@ public static DiskUsageService getSingleThreadScheduledService(List vfss, - int poolSize) { - - return new DiskUsageService(vfss, Executors.newScheduledThreadPool(poolSize)); - } - public static DiskUsageService getScheduledThreadPoolService(List vfss) { return new DiskUsageService(vfss, Executors.newScheduledThreadPool(vfss.size())); } - public static DiskUsageService getScheduledThreadPoolService(int poolSize) { - - return getScheduledThreadPoolService(Lists.newArrayList(), poolSize); - } - public List getMonitoredSAs() { return monitoredSAs; } - public void addMonitoredSA(VirtualFSInterface vfs) { - - monitoredSAs.add(vfs); - } - public synchronized int start() { if (running) { diff --git a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java index 1b2e0c173..a96d2a57b 100644 --- a/src/main/java/it/grid/storm/info/du/DiskUsageTask.java +++ b/src/main/java/it/grid/storm/info/du/DiskUsageTask.java @@ -21,7 +21,7 @@ public class DiskUsageTask implements Runnable { private static final Logger log = LoggerFactory.getLogger(DiskUsageTask.class); - private final ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); + private final ReservedSpaceCatalog spaceCatalog = ReservedSpaceCatalog.getInstance(); private VirtualFSInterface vfs; public DiskUsageTask(VirtualFSInterface vfs) { diff --git a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java index 9e6d825a2..6333f76ae 100644 --- a/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java +++ b/src/main/java/it/grid/storm/info/model/SpaceStatusSummary.java @@ -43,7 +43,7 @@ public class SpaceStatusSummary { // published by DIP SETTED TO ZERO BECAUSE CURRENTLY RETURN FAKE VALUES // For now do not consider the reserved space, a better management is needed - private static final ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + private static final ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); private static final Logger log = LoggerFactory .getLogger(SpaceStatusSummary.class); diff --git a/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java b/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java index 8a364733e..348b4a1fb 100644 --- a/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java +++ b/src/main/java/it/grid/storm/info/remote/resources/SpaceStatusResource.java @@ -46,7 +46,7 @@ public class SpaceStatusResource { private static final Logger log = LoggerFactory.getLogger(SpaceStatusResource.class); - private static final ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); + private static final ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); @GET @Produces("application/json") diff --git a/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java b/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java index e5d9d6228..4fc40a5d1 100644 --- a/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java +++ b/src/main/java/it/grid/storm/metrics/StormMetricsReporter.java @@ -1,7 +1,9 @@ package it.grid.storm.metrics; +import java.util.Map; import java.util.SortedMap; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -18,6 +20,8 @@ import it.grid.storm.common.OperationType; import it.grid.storm.filesystem.MetricsFilesystemAdapter.FilesystemMetric; +import it.grid.storm.persistence.pool.StormBeIsamConnectionPool; +import it.grid.storm.persistence.pool.StormDbConnectionPool; public class StormMetricsReporter extends ScheduledReporter { @@ -110,6 +114,18 @@ public void report(SortedMap gauges, SortedMap c reportJettyHandlerMetrics("xmlrpc-handler", meters); reportJettyHandlerMetrics("rest-handler", meters); + + reportDbPoolMetrics("storm-db", StormDbConnectionPool.getInstance().getMetrics()); + reportDbPoolMetrics("storm-be-isam", StormBeIsamConnectionPool.getInstance().getMetrics()); + } + + private void reportDbPoolMetrics(String tpName, Map metrics) { + + String result = metrics.entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .collect(Collectors.joining(", ")); + LOG.info("{} [{}]", tpName, result); } private void reportMetric(String name, Timer timer) { @@ -134,7 +150,8 @@ private void reportThreadPoolMetrics(String tpName, SortedMap gau int jobs = getIntValue(gauges.get(tpName + ".jobs")); double utilizationMax = round2dec(getDoubleValue(gauges.get(tpName + ".utilization-max"))); - LOG.info("{} [active-threads={}, idle-threads={}, jobs={}, utilization-max={}, percent-idle={}]", + LOG.info( + "{} [active-threads={}, idle-threads={}, jobs={}, utilization-max={}, percent-idle={}]", tpName, activeThreads, idleThreads, jobs, utilizationMax, percentIdle); } @@ -151,8 +168,7 @@ private void reportJettyHandlerMetrics(String handlerName, SortedMapCatalog interfaces. - * - * @author Riccardo Zappi - riccardo.zappi AT cnaf.infn.it - * @version $Id: DAOFactory.java,v 1.3 2005/10/22 15:09:40 rzappi Exp $ - */ -public interface DAOFactory { - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException; - - public TapeRecallDAO getTapeRecallDAO(); - - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException; - - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException; - - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException; - - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException; - - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException; - -} diff --git a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java b/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java deleted file mode 100644 index 76ba4cfa9..000000000 --- a/src/main/java/it/grid/storm/persistence/DataSourceConnectionFactory.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import java.sql.Connection; -import it.grid.storm.persistence.exceptions.PersistenceException; - -public interface DataSourceConnectionFactory { - - public Connection borrowConnection() throws PersistenceException; - - public void giveBackConnection(Connection con) throws PersistenceException; - -} diff --git a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java b/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java deleted file mode 100644 index d2d366b86..000000000 --- a/src/main/java/it/grid/storm/persistence/MySqlDAOFactory.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import it.grid.storm.config.Configuration; -import it.grid.storm.persistence.dao.PtGChunkDAO; -import it.grid.storm.persistence.dao.PtPChunkDAO; -import it.grid.storm.persistence.dao.RequestSummaryDAO; -import it.grid.storm.persistence.dao.StorageAreaDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.exceptions.PersistenceException; -import it.grid.storm.persistence.impl.mysql.StorageSpaceDAOMySql; -import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; -import it.grid.storm.persistence.util.db.DBConnection; -import it.grid.storm.persistence.util.db.DBConnectionPool; -import it.grid.storm.persistence.util.db.DataBaseStrategy; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MySqlDAOFactory implements DAOFactory { - - public static final String factoryName = "JDBC - MySQL DAO Factory"; - - private static final Logger log = LoggerFactory - .getLogger(MySqlDAOFactory.class); - - private static final DataBaseStrategy datasource = DataBaseStrategy.MYSQL; - private static DataSourceConnectionFactory connFactory = null; - private static MySqlDAOFactory factory = new MySqlDAOFactory(); - - static { - MySqlDAOFactory.initializeDataSource(); - } - - /** - * - */ - private MySqlDAOFactory() { - log.info("DAO factory: {}", MySqlDAOFactory.factoryName); - } - - public static MySqlDAOFactory getInstance() { - - return MySqlDAOFactory.factory; - } - - private static void initializeDataSource() { - - Configuration config = Configuration.getInstance(); - - datasource.setDbUrl(config.getBEPersistenceDBMSUrl()); - datasource.setDbName(config.getBEPersistenceDBName()); - datasource.setDbUsr(config.getBEPersistenceDBUserName()); - datasource.setDbPwd(config.getBEPersistenceDBPassword()); - - boolean pool = config.getBEPersistencePoolDB(); - if (pool) { - int maxActive = config.getBEPersistencePoolDBMaxActive(); - int maxWait = config.getBEPersistencePoolDBMaxWait(); - try { - DBConnectionPool.initPool(MySqlDAOFactory.datasource, maxActive, - maxWait); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - } - MySqlDAOFactory.connFactory = DBConnectionPool.getPoolInstance(); - } else { - try { - MySqlDAOFactory.connFactory = new DBConnection( - MySqlDAOFactory.datasource); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - } - } - } - - /** - * Returns an implementation of StorageSpaceCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return StorageSpaceDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageSpaceDAO getStorageSpaceDAO() throws DataAccessException { - - return new StorageSpaceDAOMySql(); - } - - /** - * Returns an implementation of TapeRecallCatalog, specific to a particular - * datastore. - * - * @throws DataAccessException - * @return TapeReallDAO - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public TapeRecallDAO getTapeRecallDAO() { - - return new TapeRecallDAOMySql(); - } - - /** - * @return String - */ - @Override - public String toString() { - - return MySqlDAOFactory.factoryName; - } - - - /** - * getPtGChunkDAO - * - * @return PtGChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtGChunkDAO getPtGChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getPtPChunkDAO - * - * @return PtPChunkDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public PtPChunkDAO getPtPChunkDAO() throws DataAccessException { - - return null; - } - - /** - * getRequestSummaryDAO - * - * @return RequestSummaryDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public RequestSummaryDAO getRequestSummaryDAO() throws DataAccessException { - - return null; - } - - /** - * getStorageAreaDAO - * - * @return StorageAreaDAO - * @throws DataAccessException - * @todo Implement this it.grid.storm.persistence.DAOFactory method - */ - public StorageAreaDAO getStorageAreaDAO() throws DataAccessException { - - return null; - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.DAOFactory#getTapeRecallDAO(boolean) - */ - public TapeRecallDAO getTapeRecallDAO(boolean test) - throws DataAccessException { - - return new TapeRecallDAOMySql(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java b/src/main/java/it/grid/storm/persistence/PersistenceDirector.java deleted file mode 100644 index 9b262615b..000000000 --- a/src/main/java/it/grid/storm/persistence/PersistenceDirector.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence; - -import it.grid.storm.config.Configuration; -import it.grid.storm.persistence.exceptions.PersistenceException; -import it.grid.storm.persistence.util.db.DBConnection; -import it.grid.storm.persistence.util.db.DBConnectionPool; -import it.grid.storm.persistence.util.db.DataBaseStrategy; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PersistenceDirector { - - private static final Logger log = LoggerFactory.getLogger("persistence"); - private static Configuration config = Configuration.getInstance(); - private static String dbVendor; - private static DataBaseStrategy dbMan; - private static DAOFactory daoFactory; - private static DataSourceConnectionFactory connFactory; - - static { - log.trace("Initializing Persistence Director..."); - dbMan = initializeDataBase(); - daoFactory = initializeFactory(); - connFactory = connectToDateSource(); - } - - private static DataBaseStrategy initializeDataBase() { - - dbVendor = config.getBEPersistenceDBVendor(); - log.debug("DBMS Vendor = {}",dbVendor); - log.debug("DBMS URL = {}", config.getBEPersistenceDBMSUrl()); - return DataBaseStrategy.getInstance(dbVendor); - } - - private static DAOFactory initializeFactory() { - - if (dbVendor.equalsIgnoreCase("MySql")) { - return MySqlDAOFactory.getInstance(); - } - - log.error("Unknown datastore id: {}", dbVendor); - throw new IllegalArgumentException("Unknown datastore identifier: " - +dbVendor); - } - - private static DataSourceConnectionFactory connectToDateSource() { - - DataSourceConnectionFactory result = null; - boolean poolMode = config.getBEPersistencePoolDB(); - int maxActive = config.getBEPersistencePoolDBMaxActive(); - int maxWait = config.getBEPersistencePoolDBMaxWait(); - - log.debug("Datasource connection string = {}", dbMan.getConnectionString()); - - log.debug("Pool mode = {}", poolMode); - log.debug("Pool Max Active = {}", maxActive); - log.debug("Pool Max Wait = {}", maxWait); - - if (poolMode) { - try { - DBConnectionPool.initPool(dbMan, maxActive, maxWait); - result = DBConnectionPool.getPoolInstance(); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - } - } else { - try { - result = new DBConnection(dbMan); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - } - } - return result; - } - - public static DAOFactory getDAOFactory() { - return daoFactory; - } - - public static DataBaseStrategy getDataBase() { - - return dbMan; - } - - public static DataSourceConnectionFactory getConnectionFactory() { - - return connFactory; - } - - public static Logger getLogger() { - - return log; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java new file mode 100644 index 000000000..c0ae08ef8 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/DirOptionConverter.java @@ -0,0 +1,70 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +/** + * Package private class that translates between DPM flag for TDirOption and StoRM TDirOption + * proper. + * + * In particular DPM uses the int 1 to denote a recursive call, yet it fails to distinguish between + * a chosen recursion level; in other words there is no way that DPM specifies the number of levels + * to recurse: so either you recurse till the end or nothing. + * + * @author EGRID - ICTP Trieste + * @version 1.0 + * @date August, 2005 + */ +class DirOptionConverter { + + static private DirOptionConverter converter; + + private DirOptionConverter() { + + } + + static public DirOptionConverter getInstance() { + + if (converter == null) + converter = new DirOptionConverter(); + return converter; + } + + /** + * Method that translates the int used by DPM as flag for TDirOption, into a boolean for + * isDirOption. + * + * 1 causes true to be returned; any other value returns 0. + */ + public boolean toSTORM(int n) { + + return (n == 1); + } + + /** + * Method used to translate the boolean isDirOption into an int used by DPM to express the same + * thing. + * + * true gets translated into 1; false into 0. + */ + public int toDPM(boolean isDirOption) { + + if (isDirOption) + return 1; + return 0; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java new file mode 100644 index 000000000..81af8575e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileLifetimeConverter.java @@ -0,0 +1,72 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.Configuration; + +/** + * Class that handles DB representation of a pinLifetime as expressed by a TLifetimeInSeconds + * objects; in particular it takes care of protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class FileLifetimeConverter { + + private static FileLifetimeConverter stc = new FileLifetimeConverter(); + + private FileLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static FileLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return new Long(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getFileLifetimeDefault() Configuration class method. + */ + public long toStoRM(int s) { + + if (s <= 0) + return Configuration.getInstance().getFileLifetimeDefault(); + return new Integer(s).longValue(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java new file mode 100644 index 000000000..14d93c48b --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/FileStorageTypeConverter.java @@ -0,0 +1,102 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import java.util.Map; +import java.util.HashMap; +import java.util.Iterator; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.config.Configuration; + +/** + * Package private auxiliary class used to convert between DB raw data and StoRM object model + * representation of TFileStorageType. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class FileStorageTypeConverter { + + private Map DBtoSTORM = new HashMap(); + private Map STORMtoDB = new HashMap(); + + private static FileStorageTypeConverter c = new FileStorageTypeConverter(); + + /** + * Private constructor that fills in the conversion tables; + * + * V - VOLATILE P - PERMANENT D - DURABLE + */ + private FileStorageTypeConverter() { + + DBtoSTORM.put("V", TFileStorageType.VOLATILE); + DBtoSTORM.put("P", TFileStorageType.PERMANENT); + DBtoSTORM.put("D", TFileStorageType.DURABLE); + String aux; + for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { + aux = i.next(); + STORMtoDB.put(DBtoSTORM.get(aux), aux); + } + } + + /** + * Method that returns the only instance of FileStorageTypeConverter. + */ + public static FileStorageTypeConverter getInstance() { + + return c; + } + + /** + * Method that returns the String used in the DB to represent the given TFileStorageType. The + * empty String "" is returned if no match is found. + */ + public String toDB(TFileStorageType fst) { + + String aux = (String) STORMtoDB.get(fst); + if (aux == null) + return ""; + return aux; + } + + /** + * Method that returns the TFileStorageType used by StoRM to represent the supplied String + * representation in the DB. A configured default TFileStorageType is returned in case no + * corresponding StoRM type is found. TFileStorageType.EMPTY is returned if there are + * configuration errors. + */ + public TFileStorageType toSTORM(String s) { + + TFileStorageType aux = DBtoSTORM.get(s); + if (aux == null) + // This case is that the String s is different from V,P or D. + aux = DBtoSTORM.get(Configuration.getInstance().getDefaultFileStorageType()); + if (aux == null) + // This case should never happen, but in case we prefer ponder PERMANENT. + return TFileStorageType.EMPTY; + else + return aux; + } + + public String toString() { + + return "FileStorageTypeConverter.\nDBtoSTORM map:" + DBtoSTORM + "\nSTORMtoDB map:" + STORMtoDB; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java new file mode 100644 index 000000000..6e87b8674 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/OverwriteModeConverter.java @@ -0,0 +1,103 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import java.util.Map; + +import com.google.common.collect.Maps; + +import static it.grid.storm.srm.types.TOverwriteMode.ALWAYS; +import static it.grid.storm.srm.types.TOverwriteMode.NEVER; +import static it.grid.storm.srm.types.TOverwriteMode.WHENFILESAREDIFFERENT; + +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.config.Configuration; + +/** + * Package private auxiliary class used to convert between DB and StoRM object model representation + * of TOverwriteMode. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class OverwriteModeConverter { + + private Map DBtoSTORM = Maps.newHashMap(); + private Map STORMtoDB = Maps.newHashMap(); + + private static OverwriteModeConverter c = new OverwriteModeConverter(); + + /** + * Private constructor that fills in the conversion table; in particular, DB uses String values to + * represent TOverwriteMode: + * + * N NEVER A ALWAYS D WHENFILESAREDIFFERENT + */ + private OverwriteModeConverter() { + + DBtoSTORM.put("N", NEVER); + DBtoSTORM.put("A", ALWAYS); + DBtoSTORM.put("D", WHENFILESAREDIFFERENT); + STORMtoDB.put(NEVER, "N"); + STORMtoDB.put(ALWAYS, "A"); + STORMtoDB.put(WHENFILESAREDIFFERENT, "D"); + } + + /** + * Method that returns the only instance of OverwriteModeConverter. + */ + public static OverwriteModeConverter getInstance() { + + return c; + } + + /** + * Method that returns the int used by DPM to represent the given TOverwriteMode. "" is returned + * if no match is found. + */ + public String toDB(TOverwriteMode om) { + + String aux = (String) STORMtoDB.get(om); + if (aux == null) + return ""; + return aux; + } + + /** + * Method that returns the TOverwriteMode used by StoRM to represent the supplied String + * representation of DPM. A configured default TOverwriteMode is returned in case no corresponding + * StoRM type is found. TOverwriteMode.EMPTY is returned if there are configuration errors. + */ + public TOverwriteMode toSTORM(String s) { + + TOverwriteMode aux = (TOverwriteMode) DBtoSTORM.get(s); + if (aux == null) + aux = (TOverwriteMode) DBtoSTORM.get(Configuration.getInstance().getDefaultOverwriteMode()); + if (aux == null) + return TOverwriteMode.EMPTY; + else + return aux; + } + + public String toString() { + + return "OverWriteModeConverter.\nDBtoSTORM map:" + DBtoSTORM + "\nSTORMtoDB map:" + STORMtoDB; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java new file mode 100644 index 000000000..92a7865f0 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/PinLifetimeConverter.java @@ -0,0 +1,87 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.config.Configuration; + +/** + * Class that handles DB representation of a TLifetimeInSeconds, in particular it takes care of + * protocol specification: + * + * 0/null/negative are translated as default StoRM configurable values. StoRMs Empty + * TLifeTimeInSeconds is translated as 0. + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2007 + */ +public class PinLifetimeConverter { + + private static PinLifetimeConverter stc = new PinLifetimeConverter(); + + private PinLifetimeConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static PinLifetimeConverter getInstance() { + + return stc; + } + + /** + * Method that translates the Empty TLifeTimeInSeconds into the empty representation of DB which + * is 0. Any other value is left as is. + */ + public int toDB(long l) { + + if (l == TLifeTimeInSeconds.makeEmpty().value()) + return 0; + return new Long(l).intValue(); + } + + /** + * Method that returns the long corresponding to the int value in the DB, except if it is 0, NULL + * or negative; a configurable default value is returned instead, corresponding to the + * getPinLifetimeMinimum() Configuration class method. + */ + public long toStoRM(int s) { + + if (s == 0) { + return Configuration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return Configuration.getInstance().getPinLifetimeDefault(); + } + return new Integer(s).longValue(); + } + + public long toStoRM(long s) { + + if (s == 0) { + return Configuration.getInstance().getPinLifetimeDefault(); + } else if (s < 0) { + // The default is used also as a Minimum + return Configuration.getInstance().getPinLifetimeDefault(); + } + return s; + } +} diff --git a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java similarity index 85% rename from src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java rename to src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java index 2396ef2f1..e92af6d63 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestTypeConverter.java +++ b/src/main/java/it/grid/storm/persistence/converter/RequestTypeConverter.java @@ -15,12 +15,12 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.converter; -import static it.grid.storm.catalogs.RequestSummaryDataTO.BOL_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.COPY_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTG_REQUEST_TYPE; -import static it.grid.storm.catalogs.RequestSummaryDataTO.PTP_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.BOL_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.COPY_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTG_REQUEST_TYPE; +import static it.grid.storm.persistence.model.RequestSummaryDataTO.PTP_REQUEST_TYPE; import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; import static it.grid.storm.srm.types.TRequestType.COPY; import static it.grid.storm.srm.types.TRequestType.EMPTY; @@ -37,7 +37,7 @@ * Package private auxiliary class used to convert between DB and StoRM object model representation * of the request type. */ -class RequestTypeConverter { +public class RequestTypeConverter { private Map dbToStorm = Maps.newHashMap(); private Map stormToDb = Maps.newHashMap(); diff --git a/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java new file mode 100644 index 000000000..090c397cd --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SizeInBytesIntConverter.java @@ -0,0 +1,69 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSizeInBytes; + +/** + * Class that handles DB representation of a TSizeInBytes, in particular it takes care of the NULL + * logic of the DB: 0/null are used to mean an empty field, whereas StoRM Object model uses the type + * TSizeInBytes.makeEmpty(); moreover StoRM does accept 0 as a valid TSizeInBytes, so it _is_ + * important to use this converter! + * + * @author EGRID ICTP + * @version 2.0 + * @date July 2005 + */ +public class SizeInBytesIntConverter { + + private static SizeInBytesIntConverter stc = new SizeInBytesIntConverter(); + + private SizeInBytesIntConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static SizeInBytesIntConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TSizeInBytes into the empty representation of DB which is 0. + * Any other int is left as is. + */ + public long toDB(long s) { + + if (s == TSizeInBytes.makeEmpty().value()) + return 0; + return s; + } + + /** + * Method that returns the int as is, except if it is 0 which DB interprests as empty field: in + * that case it then returns the Empty TSizeInBytes int representation. + */ + public long toStoRM(long s) { + + if (s == 0) + return TSizeInBytes.makeEmpty().value(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java new file mode 100644 index 000000000..be36a9a29 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/SpaceTokenStringConverter.java @@ -0,0 +1,69 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TSpaceToken; + +/** + * Class that handles DPM DB representation of a SpaceToken, in particular it takes care of the + * NULL/EMPTY logic of DPM. In particular DPM uses the empty string "" as meaning the absence of a + * value for the field, wheras StoRM accepts it as a valis String with which to create a + * TSpaceToken; moreover StoRM uses an Empty TSpaceToken type. + * + * @author EGRID ICTP + * @version 1.0 + * @date June 2005 + */ +public class SpaceTokenStringConverter { + + private static SpaceTokenStringConverter stc = new SpaceTokenStringConverter(); + + private SpaceTokenStringConverter() { + + } + + /** + * Method that returns the only instance od SpaceTokenConverter + */ + public static SpaceTokenStringConverter getInstance() { + + return stc; + } + + /** + * Method that translates StoRM Empty TSpaceToken String representation into DPM empty + * representation; all other Strings are left as are. + */ + public String toDB(String s) { + + if (s.equals(TSpaceToken.makeEmpty().toString())) + return ""; + return s; + } + + /** + * Method that translates DPM String representing an Empty TSpaceToken into StoRM representation; + * any other String is left as is. + */ + public String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TSpaceToken.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java new file mode 100644 index 000000000..2d1bb2eab --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/StatusCodeConverter.java @@ -0,0 +1,151 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import java.util.Map; + +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHENTICATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_CUSTOM_STATUS; +import static it.grid.storm.srm.types.TStatusCode.SRM_DONE; +import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_EXCEED_ALLOCATION; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FATAL_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_BUSY; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_IN_CACHE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LOST; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_PINNED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_UNAVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_REQUEST; +import static it.grid.storm.srm.types.TStatusCode.SRM_LAST_COPY; +import static it.grid.storm.srm.types.TStatusCode.SRM_LOWER_SPACE_GRANTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_NON_EMPTY_DIRECTORY; +import static it.grid.storm.srm.types.TStatusCode.SRM_NOT_SUPPORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_NO_FREE_SPACE; +import static it.grid.storm.srm.types.TStatusCode.SRM_NO_USER_SPACE; +import static it.grid.storm.srm.types.TStatusCode.SRM_PARTIAL_SUCCESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_SUSPENDED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_TIMED_OUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_LIFETIME_EXPIRED; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_TOO_MANY_RESULTS; + +import java.util.HashMap; +import java.util.Iterator; +import it.grid.storm.srm.types.TStatusCode; + +/** + * Package private auxiliary class used to convert between DB raw data and StoRM object model + * representation of StatusCode. + * + * @author: EGRID ICTP + * @version: 2.0 + * @date: June 2005 + */ +public class StatusCodeConverter { + + private Map DBtoSTORM = new HashMap(); + private Map STORMtoDB = new HashMap(); + + private static StatusCodeConverter c = new StatusCodeConverter(); + + private StatusCodeConverter() { + + DBtoSTORM.put(new Integer(0), SRM_SUCCESS); + DBtoSTORM.put(new Integer(1), SRM_FAILURE); + DBtoSTORM.put(new Integer(2), SRM_AUTHENTICATION_FAILURE); + DBtoSTORM.put(new Integer(3), SRM_AUTHORIZATION_FAILURE); + DBtoSTORM.put(new Integer(4), SRM_INVALID_REQUEST); + DBtoSTORM.put(new Integer(5), SRM_INVALID_PATH); + DBtoSTORM.put(new Integer(6), SRM_FILE_LIFETIME_EXPIRED); + DBtoSTORM.put(new Integer(7), SRM_SPACE_LIFETIME_EXPIRED); + DBtoSTORM.put(new Integer(8), SRM_EXCEED_ALLOCATION); + DBtoSTORM.put(new Integer(9), SRM_NO_USER_SPACE); + DBtoSTORM.put(new Integer(10), SRM_NO_FREE_SPACE); + DBtoSTORM.put(new Integer(11), SRM_DUPLICATION_ERROR); + DBtoSTORM.put(new Integer(12), SRM_NON_EMPTY_DIRECTORY); + DBtoSTORM.put(new Integer(13), SRM_TOO_MANY_RESULTS); + DBtoSTORM.put(new Integer(14), SRM_INTERNAL_ERROR); + DBtoSTORM.put(new Integer(15), SRM_FATAL_INTERNAL_ERROR); + DBtoSTORM.put(new Integer(16), SRM_NOT_SUPPORTED); + DBtoSTORM.put(new Integer(17), SRM_REQUEST_QUEUED); + DBtoSTORM.put(new Integer(18), SRM_REQUEST_INPROGRESS); + DBtoSTORM.put(new Integer(19), SRM_REQUEST_SUSPENDED); + DBtoSTORM.put(new Integer(20), SRM_ABORTED); + DBtoSTORM.put(new Integer(21), SRM_RELEASED); + DBtoSTORM.put(new Integer(22), SRM_FILE_PINNED); + DBtoSTORM.put(new Integer(23), SRM_FILE_IN_CACHE); + DBtoSTORM.put(new Integer(24), SRM_SPACE_AVAILABLE); + DBtoSTORM.put(new Integer(25), SRM_LOWER_SPACE_GRANTED); + DBtoSTORM.put(new Integer(26), SRM_DONE); + DBtoSTORM.put(new Integer(27), SRM_PARTIAL_SUCCESS); + DBtoSTORM.put(new Integer(28), SRM_REQUEST_TIMED_OUT); + DBtoSTORM.put(new Integer(29), SRM_LAST_COPY); + DBtoSTORM.put(new Integer(30), SRM_FILE_BUSY); + DBtoSTORM.put(new Integer(31), SRM_FILE_LOST); + DBtoSTORM.put(new Integer(32), SRM_FILE_UNAVAILABLE); + DBtoSTORM.put(new Integer(33), SRM_CUSTOM_STATUS); + + Object aux; + for (Iterator i = DBtoSTORM.keySet().iterator(); i.hasNext();) { + aux = i.next(); + STORMtoDB.put(DBtoSTORM.get(aux), aux); + } + } + + /** + * Method that returns the only instance of StatusCodeConverter. + */ + public static StatusCodeConverter getInstance() { + + return c; + } + + /** + * Method that returns the int used in the DB to represent the given TStatusCode. -1 is returned + * if no match is found. + */ + public int toDB(TStatusCode sc) { + + Integer aux = (Integer) STORMtoDB.get(sc); + if (aux == null) + return -1; + return aux.intValue(); + } + + /** + * Method that returns the TStatusCode used by StoRM to represent the supplied int representation + * of the DB. TStatusCode.EMPTY is returned if no StoRM type is found. + */ + public TStatusCode toSTORM(int n) { + + TStatusCode aux = DBtoSTORM.get(new Integer(n)); + if (aux == null) + return TStatusCode.EMPTY; + return aux; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java new file mode 100644 index 000000000..887072d87 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TURLConverter.java @@ -0,0 +1,68 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.srm.types.TTURL; + +/** + * Class that handles DPM DB representation of a TTURL, in particular it takes care of the + * NULL/EMPTY logic of DPM. Indeed DPM uses 0/null to mean an empty field, whereas StoRM uses the + * type TTURL.makeEmpty(); in particular StoRM converts an empty String or a null to an Empty TTURL! + * + * @author EGRID ICTP + * @version 1.0 + * @date March 2006 + */ +public class TURLConverter { + + private static TURLConverter stc = new TURLConverter(); // only instance + + private TURLConverter() { + + } + + /** + * Method that returns the only instance of SizeInBytesIntConverter + */ + public static TURLConverter getInstance() { + + return stc; + } + + /** + * Method that transaltes the Empty TTURL into the empty representation of DPM which is a null! + * Any other String is left as is. + */ + public String toDB(String s) { + + if (s.equals(TTURL.makeEmpty().toString())) + return null; + return s; + } + + /** + * Method that translates DPMs "" or null String as the Empty TTURL String representation. Any + * other String is left as is. + */ + public String toStoRM(String s) { + + if ((s == null) || (s.equals(""))) + return TTURL.makeEmpty().toString(); + return s; + } +} diff --git a/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java new file mode 100644 index 000000000..962201f14 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/converter/TransferProtocolListConverter.java @@ -0,0 +1,65 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.converter; + +import it.grid.storm.common.types.TURLPrefix; +import java.util.Iterator; +import java.util.List; +import java.util.ArrayList; +import it.grid.storm.namespace.model.Protocol; + +/** + * Package private auxiliary class used to convert between the DB raw data representation and StoRM + * s Object model list of transfer protocols. + * + */ + +public class TransferProtocolListConverter { + + /** + * Method that returns a List of Uppercase Strings used in the DB to represent the given + * TURLPrefix. An empty List is returned in case the conversion does not succeed, a null + * TURLPrefix is supplied, or its size is 0. + */ + public static List toDB(TURLPrefix turlPrefix) { + + List result = new ArrayList(); + Protocol protocol; + for (Iterator it = turlPrefix.getDesiredProtocols().iterator(); it.hasNext();) { + protocol = it.next(); + result.add(protocol.getSchema()); + } + return result; + } + + /** + * Method that returns a TURLPrefix of transfer protocol. If the translation cannot take place, a + * TURLPrefix of size 0 is returned. Likewise if a null List is supplied. + */ + public static TURLPrefix toSTORM(List listOfProtocol) { + + TURLPrefix turlPrefix = new TURLPrefix(); + Protocol protocol = null; + for (Iterator i = listOfProtocol.iterator(); i.hasNext();) { + protocol = Protocol.getProtocol(i.next()); + if (!(protocol.equals(Protocol.UNKNOWN))) + turlPrefix.addProtocol(protocol); + } + return turlPrefix; + } +} diff --git a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java index 7f773e373..3f190ccc6 100644 --- a/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/AbstractDAO.java @@ -17,11 +17,6 @@ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.exceptions.PersistenceException; - import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -30,156 +25,71 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import it.grid.storm.persistence.pool.DBConnectionPool; + public abstract class AbstractDAO { - private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); - - private DataSourceConnectionFactory connFactory; - - public AbstractDAO() { - connFactory = PersistenceDirector.getConnectionFactory(); - } - - protected void commit(Connection conn) { - - try { - conn.commit(); - conn.setAutoCommit(true); - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } - - protected Connection getConnection() throws DataAccessException { - - Connection conn = null; - try { - conn = connFactory.borrowConnection(); - } catch (PersistenceException ex) { - throw new DataAccessException(ex); - } - return conn; - } - - protected Statement getStatement(Connection conn) throws DataAccessException { - - Statement stat = null; - if (conn == null) { - throw new DataAccessException( - "No Connection available to create a Statement"); - } else { - try { - stat = conn.createStatement(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - return stat; - } - - /** - * Release a connection Accessor method. - * - * @param resultSet - * ResultSet - * @param statement - * Statement - * @param connection - * Connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet resultSet, Statement statement, - Connection connection) throws DataAccessException { - - // Release the ResultSet - closeResultSet(resultSet); - - // Close the statement - closeStatement(statement); - - // Release the connection - closeConnection(connection); - } - - /** - * Release a connection and a list of statements and result sets Accessor - * method. - * - * @param resultSets - * @param statements - * @param connection - * @throws DataAccessException - */ - protected void releaseConnection(ResultSet[] resultSets, - Statement[] statements, Connection connection) throws DataAccessException { - - // Release the ResultSets - if (resultSets != null) { - for (ResultSet resultSet : resultSets) { - closeResultSet(resultSet); - } - } - // Close the statement - if (statements != null) { - for (Statement statement : statements) { - closeStatement(statement); - } - } - // Release the connection - closeConnection(connection); - } - - private void closeResultSet(ResultSet resultSet) throws DataAccessException { - - if (resultSet != null) { - try { - resultSet.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeStatement(Statement statement) throws DataAccessException { - - if (statement != null) { - try { - statement.close(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - private void closeConnection(Connection connection) - throws DataAccessException { - - if (connection != null) { - try { - connFactory.giveBackConnection(connection); - } catch (PersistenceException e) { - log.error(e.getMessage(), e); - throw new DataAccessException(e); - } - } - } - - /** - * @param conn - */ - protected void rollback(Connection conn) { - - try { - - conn.rollback(); - conn.setAutoCommit(true); - - } catch (SQLException e) { - log.error(e.getMessage(), e); - } - } + private static final Logger log = LoggerFactory.getLogger(AbstractDAO.class); + + private final DBConnectionPool connectionPool; + + public AbstractDAO(DBConnectionPool connectionPool) { + + this.connectionPool = connectionPool; + } + + protected Connection getConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(true); + return con; + } + + protected Connection getManagedConnection() throws SQLException { + + Connection con = connectionPool.getConnection(); + con.setAutoCommit(false); + return con; + } + + protected void closeResultSet(ResultSet resultSet) { + + try { + if (resultSet != null) { + resultSet.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeStatement(Statement statement) { + + try { + if (statement != null) { + statement.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + protected void closeConnection(Connection connection) { + + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException e) { + handleSQLException(e); + } + } + + private void handleSQLException(SQLException e) { + + log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", e.getMessage(), e.getSQLState(), + e.getErrorCode(), e); + e.printStackTrace(); + } } diff --git a/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java new file mode 100644 index 000000000..f0f242ad3 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/BoLChunkDAO.java @@ -0,0 +1,40 @@ +package it.grid.storm.persistence.dao; + +import java.util.Collection; + +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +public interface BoLChunkDAO { + + void addChild(BoLChunkDataTO to); + + void addNew(BoLChunkDataTO to, String clientDn); + + void update(BoLChunkDataTO to); + + void updateIncomplete(ReducedBoLChunkDataTO to); + + Collection find(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken); + + Collection findReduced(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls); + + Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation); + + int releaseExpiredAndSuccessfulRequests(); + + void updateStatusOnMatchingStatus(TRequestToken requestToken, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + Collection find(int[] surlsUniqueIDs, String[] surlsArray); +} diff --git a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java index 7516763b5..aa0781619 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtGChunkDAO.java @@ -18,19 +18,50 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtGChunkTO; + +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; public interface PtGChunkDAO { - public PtGChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void addChild(PtGChunkDataTO to); + + public void addNew(PtGChunkDataTO to, String clientDn); + + public void update(PtGChunkDataTO to); + + public void updateIncomplete(ReducedPtGChunkDataTO chunkTO); + + public PtGChunkDataTO refresh(long primaryKey); + + public Collection find(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken); + + public Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray); + + public Collection findReduced(String griduser, int[] surlUniqueIDs, + String[] surls); + + public void fail(PtGChunkDataTO auxTO); + + public int numberInSRM_FILE_PINNED(int surlUniqueID); + + public int count(int surlUniqueID, TStatusCode status); + + public Collection transitExpiredSRM_FILE_PINNED(); + + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids); - public void addPtGChunkData(PtGChunkTO ptgChunkTO) throws DataAccessException; + public void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token); - public Collection getPtGChunksDataByToken(TRequestToken token) - throws DataAccessException; + public void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation); - public void removePtGChunksData(PtGChunkTO ptgChunkTO) - throws DataAccessException; + public void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); } diff --git a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java index bd3fe0a19..4493b9b71 100644 --- a/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/PtPChunkDAO.java @@ -18,20 +18,43 @@ package it.grid.storm.persistence.dao; import java.util.Collection; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.PtPChunkTO; +import java.util.Map; + +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; public interface PtPChunkDAO { - public PtPChunkTO getPtGChunkDataById(Long ssId) throws DataAccessException; + public void update(PtPChunkDataTO to); + + public void updateIncomplete(ReducedPtPChunkDataTO chunkTO); + + public Collection find(TRequestToken requestToken); + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn); + + public int fail(PtPChunkDataTO auxTO); + + public Map getExpiredSRM_SPACE_AVAILABLE(); + + public Map getExpired(TStatusCode status); + + public int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED(Collection ids); + + public int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, + String explanation); + + public int updateStatus(Collection ids, TStatusCode fromStatus, TStatusCode toStatus, + String explanation); - public void addPtGChunkData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation); - public Collection getPtPChunksDataByToken(TRequestToken token) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); - public void removePtGChunksData(PtPChunkTO ptpChunkData) - throws DataAccessException; + public int updateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode); } diff --git a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java index c9eb63a8a..a3a9d308d 100644 --- a/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/RequestSummaryDAO.java @@ -17,17 +17,43 @@ package it.grid.storm.persistence.dao; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.RequestSummaryTO; +import java.util.Collection; + +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; public interface RequestSummaryDAO { - public RequestSummaryTO getRequestSummaryById(Long ssId) - throws DataAccessException; + Collection fetchNewRequests(int limit); + + void failRequest(long requestId, String explanation); + + void failPtGRequest(long requestId, String explanation); + + void failPtPRequest(long requestId, String explanation); + + void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, String explanation); + + void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation); + + void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation); + + void abortRequest(TRequestToken requestToken); + + void abortInProgressRequest(TRequestToken requestToken); + + void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls); + + TRequestType getRequestType(TRequestToken requestToken); + + RequestSummaryDataTO find(TRequestToken requestToken); + + Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize); - public void addRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; + int getNumberExpired(); - public void removeRequestSummary(RequestSummaryTO rsd) - throws DataAccessException; } diff --git a/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java new file mode 100644 index 000000000..28108caea --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/SURLStatusDAO.java @@ -0,0 +1,40 @@ +package it.grid.storm.persistence.dao; + +import java.util.List; +import java.util.Map; + +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +public interface SURLStatusDAO { + + boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation); + + boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation); + + Map getPinnedSURLsForUser(GridUserInterface user, List surls); + + Map getPinnedSURLsForUser(GridUserInterface user, TRequestToken token, + List surls); + + Map getSURLStatuses(TRequestToken token); + + Map getSURLStatuses(TRequestToken token, List surls); + + int markSURLsReadyForRead(TRequestToken token, List surls); + + void releaseSURL(TSURL surl); + + void releaseSURLs(GridUserInterface user, List surls); + + void releaseSURLs(List surls); + + void releaseSURLs(TRequestToken token, List surls); + + boolean surlHasOngoingPtGs(TSURL surl); + + boolean surlHasOngoingPtPs(TSURL surl, TRequestToken token); + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java b/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java deleted file mode 100644 index 6ae104bc4..000000000 --- a/src/main/java/it/grid/storm/persistence/dao/StorageAreaDAO.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.dao; - -public interface StorageAreaDAO { -} diff --git a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java index 60c9c51cb..58d93062e 100644 --- a/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java +++ b/src/main/java/it/grid/storm/persistence/dao/TapeRecallDAO.java @@ -22,181 +22,170 @@ import java.util.Date; import java.util.List; +import java.util.Optional; import java.util.UUID; /** * Tape Recall Data Access Object (DAO) */ -public abstract class TapeRecallDAO extends AbstractDAO { - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberInProgress(String voName) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getNumberQueued(String voName) throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract int getReadyForTakeOver(String voName) - throws DataAccessException; - - /** - * @param taskId - * @param requestToken - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract List getGroupTasks(UUID groupTaskId) - throws DataAccessException; - - /** - * Verifies that a recall task with the given taskId and request token exists - * on the database - * - * @param taskId - * @param requestToken - * @return true if the recall task exists - * @throws DataAccessException - */ - public abstract boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException; - - /** - * @param groupTaskId - * @return - * @throws DataAccessException - */ - public abstract boolean existsGroupTask(UUID groupTaskId) - throws DataAccessException; - - /** - * Method called by a garbage collector that removes all tape recalls that are - * not in QUEUED (1) or IN_PROGRESS (2) status - * - * @param expirationTime seconds must pass to consider the request as expired - * @param delete at most numMaxToPurge tasks - * @return the amount of tasks deleted - * @throws DataAccessException - */ - public abstract int purgeCompletedTasks(long expirationTime, int numMaxToPurge) - throws DataAccessException; - - /** - * @param taskId - * @param newValue - * @throws DataAccessException - */ - public abstract void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException; - - /** - * - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask() throws DataAccessException; - - /** - * - * @param voName - * @return - * @throws DataAccessException - */ - public abstract TapeRecallTO takeoverTask(String voName) - throws DataAccessException; - - /** - * Performs the take-over of max numberOfTaks tasks possibly returning more - * than one file recall task for some files - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException; - - /** - * - * @param numberOfTaks - * @param voName - * @return - * @throws DataAccessException - */ - public abstract List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException; - - /** - * @param task - * @param statuses - * @param proposedGroupTaskId - * @return - * @throws DataAccessException - */ - public abstract UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException; - - /** - * @param groupTaskId - * @param statusId - * @return - * @throws DataAccessException - */ - public abstract boolean setGroupTaskStatus(UUID groupTaskId, int statusId, - Date timestamp) throws DataAccessException; - - /** - * - * @param numberOfTaks - * @return - * @throws DataAccessException - */ - public abstract List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException; - -} \ No newline at end of file +public interface TapeRecallDAO { + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberInProgress() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberInProgress(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getNumberQueued() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getNumberQueued(String voName) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public int getReadyForTakeOver(String voName) throws DataAccessException; + + /** + * @param taskId + * @param requestToken + * @return + * @throws DataAccessException + */ + public Optional getTask(UUID taskId, String requestToken) throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public List getGroupTasks(UUID groupTaskId) throws DataAccessException; + + /** + * Verifies that a recall task with the given taskId and request token exists on the database + * + * @param taskId + * @param requestToken + * @return true if the recall task exists + * @throws DataAccessException + */ + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException; + + /** + * @param groupTaskId + * @return + * @throws DataAccessException + */ + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException; + + /** + * Method called by a garbage collector that removes all tape recalls that are not in QUEUED (1) + * or IN_PROGRESS (2) status + * + * @param expirationTime seconds must pass to consider the request as expired + * @param delete at most numMaxToPurge tasks + * @return the amount of tasks deleted + * @throws DataAccessException + */ + public int purgeCompletedTasks(long expirationTime, int numMaxToPurge) throws DataAccessException; + + /** + * @param taskId + * @param newValue + * @throws DataAccessException + */ + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException; + + /** + * + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask() throws DataAccessException; + + /** + * + * @param voName + * @return + * @throws DataAccessException + */ + public TapeRecallTO takeoverTask(String voName) throws DataAccessException; + + /** + * Performs the take-over of max numberOfTaks tasks possibly returning more than one file recall + * task for some files + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException; + + /** + * + * @param numberOfTaks + * @param voName + * @return + * @throws DataAccessException + */ + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException; + + /** + * @param task + * @param statuses + * @param proposedGroupTaskId + * @return + * @throws DataAccessException + */ + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException; + + /** + * @param groupTaskId + * @param statusId + * @return + * @throws DataAccessException + */ + public boolean setGroupTaskStatus(UUID groupTaskId, int statusId, Date timestamp) + throws DataAccessException; + + /** + * + * @param numberOfTaks + * @return + * @throws DataAccessException + */ + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException; + +} diff --git a/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java new file mode 100644 index 000000000..04d37321e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/dao/VolatileAndJiTDAO.java @@ -0,0 +1,32 @@ +package it.grid.storm.persistence.dao; + +import java.util.Collection; +import java.util.List; + +public interface VolatileAndJiTDAO { + + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime); + + public void addVolatile(String filename, long start, long fileLifetime); + + public boolean exists(String filename); + + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public int numberJiT(String filename, int uid, int acl); + + public int numberVolatile(String filename); + + public void removeAllJiTsOn(String filename); + + public Collection[] removeExpired(long time); + + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime); + + public void updateVolatile(String filename, long start, long fileLifetime); + + public void updateVolatile(String fileName, long fileStart); + + public List volatileInfoOn(String filename); + +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java b/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java deleted file mode 100644 index bae662575..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/InfrastructureException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark (fatal) failures in infrastructure and system - * code. - * - * @author Christian Bauer - */ -public class InfrastructureException extends RuntimeException { - - public InfrastructureException() { - - } - - public InfrastructureException(String message) { - - super(message); - } - - public InfrastructureException(String message, Throwable cause) { - - super(message, cause); - } - - public InfrastructureException(Throwable cause) { - - super(cause); - } -} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java new file mode 100644 index 000000000..2ef423b5c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLDataAttributesException.java @@ -0,0 +1,88 @@ +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ +public class InvalidBoLDataAttributesException extends InvalidFileTransferDataAttributesException { + + private static final long serialVersionUID = 8113403994527678088L; + // booleans that indicate whether the corresponding variable is null + protected boolean nullLifeTime; + protected boolean nullDirOption; + protected boolean nullFileSize; + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) { + + super(fromSURL, transferProtocols, status, transferURL); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, String message) { + + super(fromSURL, transferProtocols, status, transferURL, message); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(fromSURL, transferProtocols, status, transferURL, cause); + init(lifeTime, dirOption, fileSize); + } + + public InvalidBoLDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL, String message, Throwable cause) { + + super(fromSURL, transferProtocols, status, transferURL, message, cause); + init(lifeTime, dirOption, fileSize); + } + + private void init(TLifeTimeInSeconds lifeTime, TDirOption dirOption, TSizeInBytes fileSize) { + + nullLifeTime = lifeTime == null; + nullDirOption = dirOption == null; + nullFileSize = fileSize == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidBoLDataAttributesException [nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..e37f5bac6 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidBoLPersistentChunkDataAttributesException.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidBoLPersistentChunkDataAttributesException + extends InvalidBoLDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidBoLPersistentChunkDataAttributesException(TRequestToken requestToken, + TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) { + + super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidBoLPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java new file mode 100644 index 000000000..fedb042b5 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidFileTransferDataAttributesException.java @@ -0,0 +1,83 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + */ +public class InvalidFileTransferDataAttributesException + extends InvalidSurlRequestDataAttributesException { + + private static final long serialVersionUID = 4416318501544415810L; + protected boolean nullTransferProtocols; + protected boolean nullTransferURL; + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL) { + + super(SURL, status); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, String message) { + + super(SURL, status, message); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(SURL, status, cause); + init(transferProtocols, transferURL); + } + + public InvalidFileTransferDataAttributesException(TSURL SURL, TURLPrefix transferProtocols, + TReturnStatus status, TTURL transferURL, String message, Throwable cause) { + + super(SURL, status, message, cause); + init(transferProtocols, transferURL); + } + + private void init(TURLPrefix transferProtocols, TTURL transferURL) { + + nullTransferProtocols = transferProtocols == null; + nullTransferURL = transferURL == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidFileTransferDataAttributesException [nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java new file mode 100644 index 000000000..1b71c9b7d --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGDataAttributesException.java @@ -0,0 +1,84 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtGChunkData are invalid, that is if any of the following is _null_: requestToken, fromSURL, + * lifeTime, numOfLevels, transferProtocols, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date March 23rd, 2005 + * @version 3.0 + */ +public class InvalidPtGDataAttributesException extends InvalidFileTransferDataAttributesException { + + private static final long serialVersionUID = -3484929474636108262L; + // booleans that indicate whether the corresponding variable is null + protected boolean nullLifeTime; + protected boolean nullDirOption; + protected boolean nullFileSize; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtGDataAttributesException(TSURL fromSURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) { + + super(fromSURL, transferProtocols, status, transferURL); + nullLifeTime = lifeTime == null; + nullDirOption = dirOption == null; + nullFileSize = fileSize == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtGChunkDataAttributesException [nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..666272f7f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtGPersistentChunkDataAttributesException.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidPtGPersistentChunkDataAttributesException + extends InvalidPtGDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtGPersistentChunkDataAttributesException(TRequestToken requestToken, + TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix transferProtocols, TSizeInBytes fileSize, TReturnStatus status, + TTURL transferURL) { + + super(fromSURL, lifeTime, dirOption, transferProtocols, fileSize, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtGPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullLifeTime="); + builder.append(nullLifeTime); + builder.append(", nullDirOption="); + builder.append(nullDirOption); + builder.append(", nullFileSize="); + builder.append(nullFileSize); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java new file mode 100644 index 000000000..4577c0582 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPDataAttributesException.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + */ +public class InvalidPtPDataAttributesException extends InvalidFileTransferDataAttributesException { + + /** + * + */ + private static final long serialVersionUID = 1051060981188652979L; + protected boolean nullSpaceToken; + protected boolean nullPinLifetime; + protected boolean nullFileLifetime; + protected boolean nullFileStorageType; + protected boolean nullKnownSizeOfThisFile; + protected boolean nullOverwriteOption; + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL) { + + super(toSURL, transferProtocols, status, transferURL); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, String message) { + + super(toSURL, transferProtocols, status, transferURL, message); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, Throwable cause) { + + super(toSURL, transferProtocols, status, transferURL, cause); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + public InvalidPtPDataAttributesException(TSURL toSURL, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes knownSizeOfThisFile, TURLPrefix transferProtocols, + TOverwriteMode overwriteOption, TReturnStatus status, TTURL transferURL, String message, + Throwable cause) { + + super(toSURL, transferProtocols, status, transferURL, message, cause); + init(spaceToken, fileLifetime, pinLifetime, fileStorageType, knownSizeOfThisFile, + overwriteOption); + } + + private void init(TSpaceToken spaceToken, TLifeTimeInSeconds fileLifetime, + TLifeTimeInSeconds pinLifetime, TFileStorageType fileStorageType, + TSizeInBytes knownSizeOfThisFile, TOverwriteMode overwriteOption) { + + nullSpaceToken = spaceToken == null; + nullPinLifetime = pinLifetime == null; + nullFileLifetime = fileLifetime == null; + nullFileStorageType = fileStorageType == null; + nullKnownSizeOfThisFile = knownSizeOfThisFile == null; + nullOverwriteOption = overwriteOption == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtPDataAttributesException [nullSpaceToken="); + builder.append(nullSpaceToken); + builder.append(", nullPinLifetime="); + builder.append(nullPinLifetime); + builder.append(", nullFileLifetime="); + builder.append(nullFileLifetime); + builder.append(", nullFileStorageType="); + builder.append(nullFileStorageType); + builder.append(", nullKnownSizeOfThisFile="); + builder.append(nullKnownSizeOfThisFile); + builder.append(", nullOverwriteOption="); + builder.append(nullOverwriteOption); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java new file mode 100644 index 000000000..f98418d59 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidPtPPersistentChunkDataAttributesException.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * PtPChunkData are invalid, that is if any of the following is _null_: requestToken, toSURL, + * lifetime, fileStorageType, spaceToken, knownSizeOfThisFile, TURLPrefix transferProtocols, + * overwriteOption, fileSize, status, transferURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class InvalidPtPPersistentChunkDataAttributesException + extends InvalidPtPDataAttributesException { + + private static final long serialVersionUID = -5117535717125685975L; + /** + * booleans that indicate whether the corresponding variable is null + */ + boolean nullRequestToken; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidPtPPersistentChunkDataAttributesException(TRequestToken requestToken, TSURL toSURL, + TLifeTimeInSeconds fileLifetime, TLifeTimeInSeconds pinLifetime, + TFileStorageType fileStorageType, TSpaceToken spaceToken, TSizeInBytes knownSizeOfThisFile, + TURLPrefix transferProtocols, TOverwriteMode overwriteOption, TReturnStatus status, + TTURL transferURL) { + + super(toSURL, fileLifetime, pinLifetime, fileStorageType, spaceToken, knownSizeOfThisFile, + transferProtocols, overwriteOption, status, transferURL); + nullRequestToken = requestToken == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidPtPPersistentChunkDataAttributesException [nullRequestToken="); + builder.append(nullRequestToken); + builder.append(", nullSpaceToken="); + builder.append(nullSpaceToken); + builder.append(", nullPinLifetime="); + builder.append(nullPinLifetime); + builder.append(", nullFileLifetime="); + builder.append(nullFileLifetime); + builder.append(", nullFileStorageType="); + builder.append(nullFileStorageType); + builder.append(", nullKnownSizeOfThisFile="); + builder.append(nullKnownSizeOfThisFile); + builder.append(", nullOverwriteOption="); + builder.append(nullOverwriteOption); + builder.append(", nullSURL="); + builder.append(nullSURL); + builder.append(", nullTransferProtocols="); + builder.append(nullTransferProtocols); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append(", nullTransferURL="); + builder.append(nullTransferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java index ebcce1ef8..f78d76977 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedBoLChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedBoLChunkDataAttributesException.java @@ -15,14 +15,14 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; /** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedBoLChunkData are invalid, that is if any is _null_. + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedBoLChunkData are invalid, that is if any is _null_. * * @author EGRID - ICTP Trieste * @date November, 2006 @@ -30,32 +30,30 @@ */ public class InvalidReducedBoLChunkDataAttributesException extends Exception { - private static final long serialVersionUID = -8145580437017768234L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedBoLChunkDataAttributesException(TSURL fromSURL, - TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid BoLChunkData attributes: null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } + private static final long serialVersionUID = -8145580437017768234L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullFromSURL; + private boolean nullStatus; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedBoLChunkDataAttributesException(TSURL fromSURL, TReturnStatus status) { + + nullFromSURL = fromSURL == null; + nullStatus = status == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid BoLChunkData attributes: null-fromSURL="); + sb.append(nullFromSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("."); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java index 7a21f0e3f..8dccfff08 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidReducedPtGChunkDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtGChunkDataAttributesException.java @@ -15,14 +15,14 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TReturnStatus; /** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of ReducedPtGChunkData are invalid, that is if any is _null_. + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedPtGChunkData are invalid, that is if any is _null_. * * @author EGRID - ICTP Trieste * @date November, 2006 @@ -30,32 +30,30 @@ */ public class InvalidReducedPtGChunkDataAttributesException extends Exception { - private static final long serialVersionUID = -7943458526292568164L; - - // booleans that indicate whether the corresponding variable is null - private boolean nullFromSURL; - private boolean nullStatus; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidReducedPtGChunkDataAttributesException(TSURL fromSURL, - TReturnStatus status) { - - nullFromSURL = fromSURL == null; - nullStatus = status == null; - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtGChunkData attributes: null-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("."); - return sb.toString(); - } + private static final long serialVersionUID = -7943458526292568164L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullFromSURL; + private boolean nullStatus; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedPtGChunkDataAttributesException(TSURL fromSURL, TReturnStatus status) { + + nullFromSURL = fromSURL == null; + nullStatus = status == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid PtGChunkData attributes: null-fromSURL="); + sb.append(nullFromSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("."); + return sb.toString(); + } } diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java new file mode 100644 index 000000000..0658d49d2 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidReducedPtPChunkDataAttributesException.java @@ -0,0 +1,70 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; + +/** + * This class represents an exceptin thrown when the attributes supplied to the constructor of + * ReducedPtPChunkData are invalid, that is if any is _null_. + * + * @author EGRID - ICTP Trieste + * @date January, 2007 + * @version 1.0 + */ +public class InvalidReducedPtPChunkDataAttributesException extends Exception { + + private static final long serialVersionUID = 4945626188325362854L; + + // booleans that indicate whether the corresponding variable is null + private boolean nullToSURL; + private boolean nullStatus; + private boolean nullFileStorageType; + private boolean nullFileLifetime; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidReducedPtPChunkDataAttributesException(TSURL toSURL, TReturnStatus status, + TFileStorageType fileStorageType, TLifeTimeInSeconds fileLifetime) { + + nullFileStorageType = fileStorageType == null; + nullToSURL = toSURL == null; + nullStatus = status == null; + nullFileLifetime = fileLifetime == null; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid PtPChunkData attributes: null-toSURL="); + sb.append(nullToSURL); + sb.append("; null-status="); + sb.append(nullStatus); + sb.append("; null-fileStorageType="); + sb.append(nullFileStorageType); + sb.append("; null-fileLifetime="); + sb.append(nullFileLifetime); + sb.append("."); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java new file mode 100644 index 000000000..403e1ebb1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidRequestSummaryDataAttributesException.java @@ -0,0 +1,65 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.griduser.GridUserInterface; + +/** + * This class represents an Exception thrown when a RequestSummaryData object is created with any + * invalid attributes: null TRequestType, null TRequestToken, null VomsGridUser. + * + * @author EGRID - ICTP Trieste + * @date March 18th, 2005 + * @version 3.0 + */ +public class InvalidRequestSummaryDataAttributesException extends Exception { + + private static final long serialVersionUID = -7729349713696058669L; + + // booleans true if the corresponding variablesare null or negative + private boolean nullRequestType = true; + private boolean nullRequestToken = true; + private boolean nullVomsGridUser = true; + + /** + * Constructor that requires the attributes that caused the exception to be thrown. + */ + public InvalidRequestSummaryDataAttributesException(TRequestType requestType, + TRequestToken requestToken, GridUserInterface gu) { + + nullRequestType = (requestType == null); + nullRequestToken = (requestToken == null); + nullVomsGridUser = (gu == null); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("Invalid RequestSummaryData attributes exception: "); + sb.append("nullRequestType="); + sb.append(nullRequestType); + sb.append("; nullRequestToken="); + sb.append(nullRequestToken); + sb.append("; nullVomsGridUser="); + sb.append(nullVomsGridUser); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java similarity index 65% rename from src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java rename to src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java index dd0e2ff90..2b4e8990b 100644 --- a/src/main/java/it/grid/storm/catalogs/InvalidSpaceDataAttributesException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSpaceDataAttributesException.java @@ -15,14 +15,13 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.srm.types.TSpaceToken; /** - * This class represents an Exception throws if SpaceResData is not well formed. - * * + * This class represents an Exception throws if SpaceResData is not well formed. * * * @author Magnoni Luca * @author Cnaf - INFN Bologna @@ -32,24 +31,24 @@ public class InvalidSpaceDataAttributesException extends Exception { - private static final long serialVersionUID = -5317879266114702669L; + private static final long serialVersionUID = -5317879266114702669L; - private boolean nullAuth = true; - private boolean nullToken = true; + private boolean nullAuth = true; + private boolean nullToken = true; - public InvalidSpaceDataAttributesException(GridUserInterface guser) { + public InvalidSpaceDataAttributesException(GridUserInterface guser) { - nullAuth = (guser == null); - } + nullAuth = (guser == null); + } - public InvalidSpaceDataAttributesException(TSpaceToken token) { + public InvalidSpaceDataAttributesException(TSpaceToken token) { - nullToken = (token == null); - } + nullToken = (token == null); + } - public String toString() { + public String toString() { - return "null-Auth=" + nullAuth + "nullToken=" + nullToken; - } + return "null-Auth=" + nullAuth + "nullToken=" + nullToken; + } } diff --git a/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java new file mode 100644 index 000000000..582f3c396 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/exceptions/InvalidSurlRequestDataAttributesException.java @@ -0,0 +1,83 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.exceptions; + +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +/** + * @author Michele Dibenedetto + * + */ +public class InvalidSurlRequestDataAttributesException extends Exception { + + private static final long serialVersionUID = -8636768167720753989L; + protected boolean nullSURL; + protected boolean nullStatus; + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status) { + + super(); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, + String message) { + + super(message); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, + Throwable cause) { + + super(cause); + init(SURL, status); + } + + public InvalidSurlRequestDataAttributesException(TSURL SURL, TReturnStatus status, String message, + Throwable cause) { + + super(message, cause); + init(SURL, status); + } + + private void init(TSURL SURL, TReturnStatus status) { + + nullSURL = SURL == null; + nullStatus = status == null; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("InvalidSurlRequestDataAttributesException [nullSURL="); + builder.append(nullSURL); + builder.append(", nullStatus="); + builder.append(nullStatus); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java similarity index 95% rename from src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java rename to src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java index f4b61055e..1f5fc2bb1 100644 --- a/src/main/java/it/grid/storm/catalogs/MalformedGridUserException.java +++ b/src/main/java/it/grid/storm/persistence/exceptions/MalformedGridUserException.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.exceptions; /** * This class represents an Exception thrown when the RequestSummaryCatalog cannot create a diff --git a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java b/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java deleted file mode 100644 index 8c645afba..000000000 --- a/src/main/java/it/grid/storm/persistence/exceptions/PersistenceException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.exceptions; - -/** - * This exception is used to mark generic failures in persistence layer - * - */ - -public class PersistenceException extends Exception { - - public PersistenceException() { - - super(); - } - - public PersistenceException(String message) { - - super(message); - } - - public PersistenceException(String message, Throwable cause) { - - super(message, cause); - } - - public PersistenceException(Throwable cause) { - - super(cause); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java new file mode 100644 index 000000000..08a102066 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/BoLChunkDAOMySql.java @@ -0,0 +1,948 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.BRING_ON_LINE; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.BoLChunkDAO; +import it.grid.storm.persistence.model.BoLChunkDataTO; +import it.grid.storm.persistence.model.ReducedBoLChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for BoLChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDAOMySql extends AbstractDAO implements BoLChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(BoLChunkDAOMySql.class); + + private static final String SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN = + "SELECT rq.ID FROM request_queue rq WHERE rq.r_token=?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS = + "SELECT sb.statusCode, rq.timeStamp, rq.pinLifetime, rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sb.statusCode<>?"; + + private static final String SELECT_FULL_BOL_REQUEST_WITH_TOKEN = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + "WHERE rq.r_token=?"; + + private static final String INSERT_INTO_REQUEST_QUEUE = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp,deferredStartTime) " + + "VALUES (?,?,?,?,?,?,?,?,?)"; + + private static final String INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) VALUES (?,?)"; + + private static final String INSERT_INTO_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) VALUES (?,?,?)"; + + private static final String INSERT_INTO_REQUEST_BOL = + "INSERT INTO request_BoL (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String UPDATE_REQUEST_BOL_WHERE_ID = + "UPDATE request_BoL SET normalized_sourceSURL_StFN=?, sourceSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String INSERT_INTO_STATUS_BOL = + "INSERT INTO status_BoL (request_BoLID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_QUEUE_WHERE_ID = + "UPDATE request_queue rq JOIN (status_BoL sb, request_BoL rb) ON (rq.ID=rb.request_queueID AND sb.request_BoLID=rb.ID) " + + "SET sb.fileSize=?, sb.statusCode=?, sb.explanation=?, rq.pinLifetime=?, rb.normalized_sourceSURL_StFN=?, rb.sourceSURL_uniqueID=? " + + "WHERE rb.ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN = "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_WHERE_ID = + "UPDATE status_BoL SET statusCode=?, explanation=? WHERE request_BoLID=?"; + + private static final String UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS = + "UPDATE status_BoL sb " + + "JOIN (request_BoL rb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "SET sb.statusCode=? " + + "WHERE sb.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static BoLChunkDAOMySql instance; + + public static synchronized BoLChunkDAO getInstance() { + if (instance == null) { + instance = new BoLChunkDAOMySql(); + } + return instance; + } + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + + private BoLChunkDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The supplied BoLChunkData is used to fill in only the DB + * table where file specific info gets recorded: it does _not_ add a new request! So if spurious + * data is supplied, it will just stay there because of a lack of a parent request! + */ + public synchronized void addChild(BoLChunkDataTO to) { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getManagedConnection(); + + /* + * WARNING!!!! We are forced to run a query to get the ID of the request, which should NOT be + * so because the corresponding request object should have been changed with the extra field! + * However, it is not possible at the moment to perform such change because of strict deadline + * and the change could wreak havoc the code. So we are forced to make this query!!! + */ + + ps = con.prepareStatement(SELECT_FROM_REQUEST_QUEUE_WITH_TOKEN); + ps.setString(1, to.getRequestToken()); + log.debug("BoL CHUNK DAO: addChild; {}", ps); + res = ps.executeQuery(); + + /* ID of request in request_process! */ + int requestId = extractID(res); + int id = fillBoLTables(con, to, requestId); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + } catch (Exception e) { + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied BoLChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. The + * supplied BoLChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(BoLChunkDataTO to, String client_dn) { + + final String DESCRIPTION = "New BoL Request resulting from srmCopy invocation."; + + /* Result set containing the ID of the inserted new request */ + ResultSet rs = null; + PreparedStatement addReqQ = null; + PreparedStatement addReqTP = null; + Connection con = null; + + try { + // begin transaction + + con = getManagedConnection(); + + // add to request_queue... + addReqQ = con.prepareStatement(INSERT_INTO_REQUEST_QUEUE, RETURN_GENERATED_KEYS); + /* request type set to bring online */ + addReqQ.setString(1, requestTypeConverter.toDB(BRING_ON_LINE)); + addReqQ.setString(2, client_dn); + addReqQ.setInt(3, to.getLifeTime()); + addReqQ.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addReqQ.setString(5, DESCRIPTION); + addReqQ.setString(6, to.getRequestToken()); + addReqQ.setInt(7, 1); // number of requested files set to 1! + addReqQ.setTimestamp(8, new Timestamp(new Date().getTime())); + addReqQ.setInt(9, to.getDeferredStartTime()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqQ); + addReqQ.execute(); + + rs = addReqQ.getGeneratedKeys(); + int id_new = extractID(rs); + + addReqTP = con.prepareStatement(INSERT_INTO_REQUEST_TRANSFER_PROTOCOLS); + for (Iterator i = to.getProtocolList().iterator(); i.hasNext();) { + addReqTP.setInt(1, id_new); + addReqTP.setString(2, i.next()); + log.trace("BoL CHUNK DAO: addNew; {}", addReqTP); + addReqTP.execute(); + } + + // addChild... + int id_s = fillBoLTables(con, to, id_new); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id_s); + } catch (Exception e) { + log.error("BoL CHUNK DAO: unable to complete addNew! BoLChunkDataTO: {}; " + + "exception received: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(addReqQ); + closeStatement(addReqTP); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillBoLTables(Connection con, BoLChunkDataTO to, int requestQueueID) + throws SQLException, Exception { + + /* Result set containing the ID of the inserted */ + ResultSet rs_do = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_b = null; + /* Result set containing the ID of the inserted */ + ResultSet rs_s = null; + /* insert TDirOption for request */ + PreparedStatement addDirOption = null; + /* insert request_Bol for request */ + PreparedStatement addBoL = null; + PreparedStatement addChild = null; + + try { + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_INTO_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.getDirOption()); + addDirOption.setBoolean(2, to.getAllLevelRecursive()); + addDirOption.setInt(3, to.getNumLevel()); + log.trace("BoL CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rs_do = addDirOption.getGeneratedKeys(); + int id_do = extractID(rs_do); + + // second fill in request_BoL... sourceSURL and TDirOption! + addBoL = con.prepareStatement(INSERT_INTO_REQUEST_BOL, RETURN_GENERATED_KEYS); + addBoL.setInt(1, id_do); + addBoL.setInt(2, requestQueueID); + addBoL.setString(3, to.getFromSURL()); + addBoL.setString(4, to.normalizedStFN()); + addBoL.setInt(5, to.sulrUniqueID()); + log.trace("BoL CHUNK DAO: addNew; {}", addBoL); + addBoL.execute(); + + rs_b = addBoL.getGeneratedKeys(); + int id_g = extractID(rs_b); + + // third fill in status_BoL... + addChild = con.prepareStatement(INSERT_INTO_STATUS_BOL, RETURN_GENERATED_KEYS); + addChild.setInt(1, id_g); + addChild.setInt(2, to.getStatus()); + addChild.setString(3, to.getErrString()); + log.trace("BoL CHUNK DAO: addNew; " + addChild); + addChild.execute(); + + return id_g; + } finally { + closeResultSet(rs_do); + closeResultSet(rs_b); + closeResultSet(rs_s); + closeStatement(addDirOption); + closeStatement(addBoL); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved BoLChunkDataTO, back into the MySQL DB. + * Only the fileSize, statusCode and explanation, of status_BoL table are written to the DB. + * Likewise for the request pinLifetime. In case of any error, an error message gets logged but no + * exception is thrown. + */ + public synchronized void update(BoLChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + try { + con = getConnection(); + // ready updateFileReq... + updateFileReq = con.prepareStatement(UPDATE_REQUEST_QUEUE_WHERE_ID); + updateFileReq.setLong(1, to.getFileSize()); + updateFileReq.setInt(2, to.getStatus()); + updateFileReq.setString(3, to.getErrString()); + updateFileReq.setInt(4, to.getLifeTime()); + updateFileReq.setString(5, to.normalizedStFN()); + updateFileReq.setInt(6, to.sulrUniqueID()); + updateFileReq.setLong(7, to.getPrimaryKey()); + // execute update + log.trace("BoL CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Bol represented by the received ReducedBoLChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedBoLChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_BOL_WHERE_ID); + ps.setString(1, chunkTO.normalizedStFN()); + ps.setInt(2, chunkTO.surlUniqueID()); + ps.setLong(3, chunkTO.primaryKey()); + log.trace("BoL CHUNK DAO - update incomplete: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("BoL CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding BoLChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_BoL, status_BoL and request_DirOption. The considered fields are: (1) From status_BoL: + * the ID field which becomes the TOs primary key, and statusCode. (2) From request_BoL: + * sourceSURL (3) From request_queue: pinLifetime (4) From request_DirOption: isSourceADirectory, + * alLevelRecursive, numOfLevels In case of any error, a log gets written and an empty collection + * is returned. No exception is thrown. NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement pps = null; + PreparedStatement rps = null; + ResultSet prs = null; + ResultSet rrs = null; + + try { + + con = getConnection(); + pps = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + pps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO: find method; {}", pps); + prs = pps.executeQuery(); + + while (prs.next()) { + protocols.add(prs.getString("tp.config_ProtocolsID")); + } + + rps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN_AND_STATUS); + List results = Lists.newArrayList(); + rps.setString(1, requestToken.getValue()); + rps.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("BoL CHUNK DAO: find method; {}", rps); + rrs = rps.executeQuery(); + + while (rrs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rrs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rrs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rrs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setTimeStamp(rrs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rrs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rrs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rrs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rrs.getInt("rb.sourceSURL_uniqueID"); + if (!rrs.wasNull()) { + chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); + } + + chunkDataTO.setDirOption(rrs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rrs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rrs.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return Lists.newArrayList(); + + } finally { + closeResultSet(prs); + closeResultSet(rrs); + closeStatement(pps); + closeStatement(rps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + List results = Lists.newArrayList(); + + try { + + con = getConnection(); + + ps = con.prepareStatement(SELECT_FULL_BOL_REQUEST_WITH_TOKEN); + ps.setString(1, requestToken.getValue()); + log.trace("BoL CHUNK DAO! findReduced with request token; {}", ps); + rs = ps.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + return results; + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return results; + + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.r_token=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + ReducedBoLChunkDataTO chunkDataTO = null; + while (rs.next()) { + chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedBoLChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_Bol that have + * not the uniqueID set because are not yet been used by anybody + */ + // get reduced chunks + String str = + "SELECT sb.statusCode, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, rb.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "WHERE rq.client_dn=? AND ( rb.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rb.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("BoL CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedBoLChunkDataTO chunkDataTO = new ReducedBoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("BoL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized int updateStatus(BoLChunkDataTO to, TStatusCode status, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + int result = 0; + + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_STATUS_WHERE_ID); + ps.setInt(1, statusCodeConverter.toDB(status)); + ps.setString(2, explanation); + ps.setLong(3, to.getPrimaryKey()); + log.trace("BoL CHUNK DAO: update status {}", ps); + result = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * Method that updates to SRM_RELEASED all the requests in SRM_SUCCESS status which have the + * requested pin lifetime expired. This is necessary when the client forgets to invoke + * srmReleaseFiles(). + * + * @return List of updated SURLs. + */ + public synchronized int releaseExpiredAndSuccessfulRequests() { + + Connection con = null; + PreparedStatement ps = null; + + int count = 0; + + try { + + // start transaction + con = getConnection(); + + /* Update status of all successful expired requests to SRM_RELEASED */ + ps = con.prepareStatement(UPDATE_STATUS_FOR_EXPIRED_PIN_REQUESTS_WITH_STATUS); + ps.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + ps.setInt(2, statusCodeConverter.toDB(SRM_SUCCESS)); + log.trace("BoL CHUNK DAO - transitExpiredSRM_SUCCESS method: {}", ps); + + count = ps.executeUpdate(); + + if (count == 0) { + log.trace( + "BoLChunkDAO! No chunk of BoL request was transited from SRM_SUCCESS to SRM_RELEASED."); + } else { + log.info( + "BoLChunkDAO! {} chunks of BoL requests were transited from SRM_SUCCESS to SRM_RELEASED.", + count); + } + + } catch (SQLException e) { + + log.error("BoLChunkDAO! SQLException.", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeStatement(ps); + closeConnection(con); + } + return count; + } + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + String str = "UPDATE status_BoL sb JOIN (request_BoL rb, request_queue rq) " + + "ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + "SET sb.statusCode=? "; + if (withExplanation) { + str += " , " + buildExplanationSet(explanation); + } + str += " WHERE sb.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("BOL CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("BOL CHUNK DAO! Unable to updated from {} to {}!", expectedStatusCode, + newStatusCode, e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray) + throws IllegalArgumentException { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0) { + throw new IllegalArgumentException("Unable to perform the find, " + + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + " surlsArray=" + surlsArray); + } + return find(surlsUniqueIDs, surlsArray, null, false); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + + // get chunks of the request + String str = "SELECT rq.ID, rq.r_token, sb.statusCode, rq.timeStamp, rq.pinLifetime, " + + "rq.deferredStartTime, rb.ID, rb.sourceSURL, rb.normalized_sourceSURL_StFN, " + + "rb.sourceSURL_uniqueID, d.isSourceADirectory, d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID=rq.ID AND sb.request_BoLID=rb.ID) " + + "LEFT JOIN request_DirOption d ON rb.request_DirOptionID=d.ID " + + "WHERE ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + find = con.prepareStatement(str); + + log.trace("BOL CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + BoLChunkDataTO chunkDataTO = new BoLChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sb.statusCode")); + chunkDataTO.setLifeTime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setDeferredStartTime(rs.getInt("rq.deferredStartTime")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPrimaryKey(rs.getLong("rb.ID")); + chunkDataTO.setFromSURL(rs.getString("rb.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rb.normalized_sourceSURL_StFN")); + + int uniqueID = rs.getInt("rb.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); + } + + chunkDataTO.setDirOption(rs.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rs.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rs.getInt("d.numOfLevels")); + + results.add(chunkDataTO); + } + + } catch (SQLException e) { + + log.error("BOL CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + + } + + return results; + } + + /** + * Private method that returns the generated ID: it throws an exception in case of any problem! + */ + private int extractID(ResultSet rs) throws Exception { + + if (rs == null) { + throw new Exception("BoL CHUNK DAO! Null ResultSet!"); + } + if (rs.next()) { + return rs.getInt(1); + } + String msg = + "BoL CHUNK DAO! It was not possible to establish the assigned autoincrement primary key!"; + log.error(msg); + throw new Exception(msg); + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExplanationSet(String explanation) { + + return " sb.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rb.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rb.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java new file mode 100644 index 000000000..070755a5a --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtGChunkDAOMySql.java @@ -0,0 +1,1263 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_PINNED; +import static it.grid.storm.srm.types.TStatusCode.SRM_RELEASED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static java.sql.Statement.RETURN_GENERATED_KEYS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import it.grid.storm.ea.StormEA; +import it.grid.storm.namespace.NamespaceDirector; +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.StoRI; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtGChunkDAO; +import it.grid.storm.persistence.model.PtGChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtGChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.InvalidTSURLAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtGChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. + * + * BEWARE! DAO Adjusts for extra fields in the DB that are not present in the object model. + * + * @author EGRID ICTP + * @version 3.0 + * @date June 2005 + */ +public class PtGChunkDAOMySql extends AbstractDAO implements PtGChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtGChunkDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_TOKEN = + "SELECT * FROM request_queue WHERE r_token=?"; + + private static final String INSERT_REQUEST = + "INSERT INTO request_queue (config_RequestTypeID,client_dn,pinLifetime,status,errstring,r_token,nbreqfiles,timeStamp) " + + "VALUES (?,?,?,?,?,?,?,?)"; + + private static final String INSERT_REQUEST_TRASNFER_PROTOCOL = + "INSERT INTO request_TransferProtocols (request_queueID,config_ProtocolsID) " + + "VALUES (?,?)"; + + private static final String INSERT_REQUEST_DIR_OPTION = + "INSERT INTO request_DirOption (isSourceADirectory,allLevelRecursive,numOfLevels) " + + "VALUES (?,?,?)"; + + private static final String INSERT_REQUEST_GET = + "INSERT INTO request_Get (request_DirOptionID,request_queueID,sourceSURL,normalized_sourceSURL_StFN,sourceSURL_uniqueID) " + + "VALUES (?,?,?,?,?)"; + + private static final String INSERT_STATUS_GET = + "INSERT INTO status_Get (request_GetID,statusCode,explanation) VALUES (?,?,?)"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID = + "UPDATE request_queue rq JOIN (status_Get sg, request_Get rg) ON (rq.ID=rg.request_queueID AND sg.request_GetID=rg.ID) " + + "SET sg.fileSize=?, sg.transferURL=?, sg.statusCode=?, sg.explanation=?, rq.pinLifetime=?, rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String UPDATE_REQUEST_GET_WHERE_ID = + "UPDATE request_Get rg SET rg.normalized_sourceSURL_StFN=?, rg.sourceSURL_uniqueID=? " + + "WHERE rg.ID=?"; + + private static final String SELECT_STATUS_GET_WHERE_GET_ID = + "SELECT statusCode, transferURL FROM status_Get WHERE request_GetID=?"; + + private static final String SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS = + "SELECT sg.statusCode, rq.pinLifetime, rg.ID, rq.timeStamp, rq.client_dn, rq.proxy, rg.sourceSURL, " + + "rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID, d.isSourceADirectory, " + + "d.allLevelRecursive, d.numOfLevels " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "LEFT JOIN request_DirOption d ON rg.request_DirOptionID=d.ID " + + "WHERE rq.r_token=? AND sg.statusCode<>?"; + + private static final String SELECT_REQUEST_GET_WHERE_TOKEN = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + "WHERE rq.r_token=?"; + + private static final String UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS = + "UPDATE status_Get SET statusCode=?, explanation=? WHERE request_GetID=?"; + + private static final String COUNT_REQUEST_ON_SURL_WITH_STATUS = + "SELECT COUNT(rg.ID) FROM status_Get sg JOIN request_Get rg " + + "ON (sg.request_GetID=rg.ID) WHERE rg.sourceSURL_uniqueID=? AND sg.statusCode=?"; + + private static final String SELECT_EXPIRED_REQUESTS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID " + + "FROM request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime"; + + private static final String UPDATE_STATUS_OF_EXPIRED_REQUESTS = + "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static final String SELECT_PTG_PINNED_SURLS = + "SELECT rg.sourceSURL , rg.sourceSURL_uniqueID FROM " + + "request_Get rg JOIN (status_Get sg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "WHERE sg.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static final String SELECT_BOL_PINNED_SURLS = + "SELECT rb.sourceSURL , rb.sourceSURL_uniqueID FROM " + + "request_BoL rb JOIN (status_BoL sb, request_queue rq) ON sb.request_BoLID=rb.ID AND rb.request_queueID=rq.ID " + + "WHERE sb.statusCode=?" + + " AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) < rq.pinLifetime "; + + private static PtGChunkDAOMySql instance; + + public static synchronized PtGChunkDAO getInstance() { + if (instance == null) { + instance = new PtGChunkDAOMySql(); + } + return instance; + } + + private final RequestTypeConverter requestTypeConverter; + private final StatusCodeConverter statusCodeConverter; + + private PtGChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + requestTypeConverter = RequestTypeConverter.getInstance(); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. + * + * The supplied PtGChunkData is used to fill in only the DB table where file specific info gets + * recorded: it does _not_ add a new request! So if spurious data is supplied, it will just stay + * there because of a lack of a parent request! + */ + public synchronized void addChild(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement id = null; + ResultSet rsid = null; + + try { + + // WARNING!!!! We are forced to run a query to get the ID of the request, + // which should NOT be so + // because the corresponding request object should have been changed with + // the extra field! However, it is not possible + // at the moment to perform such chage because of strict deadline and the + // change could wreak havoc + // the code. So we are forced to make this query!!! + + con = getManagedConnection(); + id = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN); + id.setString(1, to.requestToken()); + log.debug("PTG CHUNK DAO: addChild; {}", id); + rsid = id.executeQuery(); + + if (rsid.next()) { + + int requestId = rsid.getInt("ID"); + int id_s = fillPtGTables(con, to, requestId); + con.commit(); + to.setPrimaryKey(id_s); + + } else { + log.error("Unable to find queued request for token {}", to.requestToken()); + con.rollback(); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: unable to complete addChild! " + "PtGChunkDataTO: {}; error: {}", + to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsid); + closeStatement(id); + closeConnection(con); + } + } + + /** + * Method used to add a new record to the DB: the supplied PtGChunkDataTO gets its primaryKey + * changed to the one assigned by the DB. The client_dn must also be supplied as a String. + * + * The supplied PtGChunkData is used to fill in all the DB tables where file specific info gets + * recorded: it _adds_ a new request! + */ + public synchronized void addNew(PtGChunkDataTO to, String clientDn) { + + Connection con = null; + ResultSet rsNew = null; + PreparedStatement addNew = null; + PreparedStatement addProtocols = null; + + try { + + con = getManagedConnection(); + + addNew = con.prepareStatement(INSERT_REQUEST, RETURN_GENERATED_KEYS); + addNew.setString(1, requestTypeConverter.toDB(PREPARE_TO_GET)); + addNew.setString(2, clientDn); + addNew.setInt(3, to.lifeTime()); + addNew.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + addNew.setString(5, "New PtG Request resulting from srmCopy invocation."); + addNew.setString(6, to.requestToken()); + addNew.setInt(7, 1); // number of requested files set to 1! + addNew.setTimestamp(8, new Timestamp(new Date().getTime())); + log.trace("PTG CHUNK DAO: addNew; {}", addNew); + addNew.execute(); + + rsNew = addNew.getGeneratedKeys(); + + if (!rsNew.next()) { + log.error("Unable to insert new request"); + con.rollback(); + return; + } + int idNew = rsNew.getInt(1); + + // add protocols... + addProtocols = con.prepareStatement(INSERT_REQUEST_TRASNFER_PROTOCOL); + for (Iterator i = to.protocolList().iterator(); i.hasNext();) { + addProtocols.setInt(1, idNew); + addProtocols.setString(2, i.next()); + log.trace("PTG CHUNK DAO: addNew; {}", addProtocols); + addProtocols.execute(); + } + + // addChild... + int id = fillPtGTables(con, to, idNew); + + // end transaction! + con.commit(); + + // update primary key reading the generated key + to.setPrimaryKey(id); + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: Rolling back! Unable to complete addNew! " + + "PtGChunkDataTO: {}; error: {}", to, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsNew); + closeStatement(addNew); + closeStatement(addProtocols); + closeConnection(con); + } + } + + /** + * To be used inside a transaction + * + * @param to + * @param requestQueueID + * @return + * @throws SQLException + * @throws Exception + */ + private synchronized int fillPtGTables(Connection con, PtGChunkDataTO to, int requestQueueID) + throws SQLException { + + ResultSet rsDo = null; + ResultSet rsG = null; + ResultSet rsS = null; + PreparedStatement addDirOption = null; + PreparedStatement addGet = null; + PreparedStatement addChild = null; + + try { + + // first fill in TDirOption + addDirOption = con.prepareStatement(INSERT_REQUEST_DIR_OPTION, RETURN_GENERATED_KEYS); + addDirOption.setBoolean(1, to.dirOption()); + addDirOption.setBoolean(2, to.allLevelRecursive()); + addDirOption.setInt(3, to.numLevel()); + log.trace("PTG CHUNK DAO: addNew; {}", addDirOption); + addDirOption.execute(); + + rsDo = addDirOption.getGeneratedKeys(); + + if (!rsDo.next()) { + throw new SQLException("Unable to get dir_option id"); + } + int idDo = rsDo.getInt(1); + + // second fill in request_Get... sourceSURL and TDirOption! + addGet = con.prepareStatement(INSERT_REQUEST_GET, RETURN_GENERATED_KEYS); + addGet.setInt(1, idDo); + addGet.setInt(2, requestQueueID); + addGet.setString(3, to.fromSURL()); + addGet.setString(4, to.normalizedStFN()); + addGet.setInt(5, to.surlUniqueID()); + log.trace("PTG CHUNK DAO: addNew; {}", addGet); + addGet.execute(); + + rsG = addGet.getGeneratedKeys(); + if (!rsG.next()) { + throw new SQLException("Unable to get request_get id"); + } + int idG = rsG.getInt(1); + + // third fill in status_Get... + addChild = con.prepareStatement(INSERT_STATUS_GET, RETURN_GENERATED_KEYS); + addChild.setInt(1, idG); + addChild.setInt(2, to.status()); + addChild.setString(3, to.errString()); + log.trace("PTG CHUNK DAO: addNew; {}", addChild); + addChild.execute(); + + return idG; + + } finally { + closeResultSet(rsDo); + closeResultSet(rsG); + closeResultSet(rsS); + closeStatement(addDirOption); + closeStatement(addGet); + closeStatement(addChild); + } + } + + /** + * Method used to save the changes made to a retrieved PtGChunkDataTO, back into the MySQL DB. + * + * Only the fileSize, transferURL, statusCode and explanation, of status_Get table are written to + * the DB. Likewise for the request pinLifetime. + * + * In case of any error, an error message gets logged but no exception is thrown. + */ + public synchronized void update(PtGChunkDataTO to) { + + Connection con = null; + PreparedStatement updateFileReq = null; + + try { + + con = getConnection(); + updateFileReq = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID); + updateFileReq.setLong(1, to.fileSize()); + updateFileReq.setString(2, to.turl()); + updateFileReq.setInt(3, to.status()); + updateFileReq.setString(4, to.errString()); + updateFileReq.setInt(5, to.lifeTime()); + updateFileReq.setString(6, to.normalizedStFN()); + updateFileReq.setInt(7, to.surlUniqueID()); + updateFileReq.setLong(8, to.primaryKey()); + // execute update + log.trace("PTG CHUNK DAO: update method; {}", updateFileReq); + updateFileReq.executeUpdate(); + + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updateFileReq); + closeConnection(con); + } + } + + /** + * Updates the request_Get represented by the received ReducedPtGChunkDataTO by setting its + * normalized_sourceSURL_StFN and sourceSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtGChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_GET_WHERE_ID); + update.setString(1, chunkTO.normalizedStFN()); + update.setInt(2, chunkTO.surlUniqueID()); + update.setLong(3, chunkTO.primaryKey()); + log.trace("PtG CHUNK DAO - update incomplete: {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("PtG CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * TODO WARNING! THIS IS A WORK IN PROGRESS!!! + * + * Method used to refresh the PtGChunkDataTO information from the MySQL DB. + * + * In this first version, only the statusCode and the TURL are reloaded from the DB. TODO The next + * version must contains all the information related to the Chunk! + * + * In case of any error, an error messagge gets logged but no exception is thrown. + */ + + public synchronized PtGChunkDataTO refresh(long primaryKey) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + PtGChunkDataTO chunkDataTO = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_STATUS_GET_WHERE_GET_ID); + find.setLong(1, primaryKey); + log.trace("PTG CHUNK DAO: refresh status method; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setTurl(rs.getString("sg.transferURL")); + } + return chunkDataTO; + + } catch (SQLException e) { + + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + return null; + + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtGChunkDataTO objects. + * + * An initial simple query establishes the list of protocols associated with the request. A second + * complex query establishes all chunks associated with the request, by properly joining + * request_queue, request_Get, status_Get and request_DirOption. The considered fields are: + * + * (1) From status_Get: the ID field which becomes the TOs primary key, and statusCode. + * + * (2) From request_Get: sourceSURL + * + * (3) From request_queue: pinLifetime + * + * (4) From request_DirOption: isSourceADirectory, alLevelRecursive, numOfLevels + * + * In case of any error, a log gets written and an empty collection is returned. No exception is + * thrown. + * + * NOTE! Chunks in SRM_ABORTED status are NOT returned! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_GET_PROTOCOLS_WHERE_TOKEN); + + List protocols = Lists.newArrayList(); + findProtocols.setString(1, requestToken.getValue()); + log.trace("PTG CHUNK DAO: find method; {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + findRequest = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PTG CHUNK DAO: find method; {}", findRequest); + rsRequest = findRequest.executeQuery(); + + PtGChunkDataTO chunkDataTO; + while (rsRequest.next()) { + chunkDataTO = new PtGChunkDataTO(); + chunkDataTO.setStatus(rsRequest.getInt("sg.statusCode")); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setPrimaryKey(rsRequest.getLong("rg.ID")); + chunkDataTO.setFromSURL(rsRequest.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rsRequest.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rsRequest.getInt("rg.sourceSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(new Integer(uniqueID)); + } + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separeted by the "#" char. The proxy is a BLOB, hence it has + * to be properly conveted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setLifeTime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setDirOption(rsRequest.getBoolean("d.isSourceADirectory")); + chunkDataTO.setAllLevelRecursive(rsRequest.getBoolean("d.allLevelRecursive")); + chunkDataTO.setNumLevel(rsRequest.getInt("d.numOfLevels")); + chunkDataTO.setProtocolList(protocols); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTG CHUNK DAO: ", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given TRequestToken + * expressed as String. + */ + public synchronized Collection findReduced(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_REQUEST_GET_WHERE_TOKEN); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with request token; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO reducedChunkDataTO = new ReducedPtGChunkDataTO(); + reducedChunkDataTO.setStatus(rs.getInt("sg.statusCode")); + reducedChunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + reducedChunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + reducedChunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + reducedChunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(reducedChunkDataTO); + } + + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + public synchronized Collection findReduced(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surlsArray) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.r_token=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlsUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surlsArray) + " ) "; + + con = getConnection(); + find = con.prepareStatement(str); + find.setString(1, requestToken.getValue()); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method that returns a Collection of ReducedPtGChunkDataTO associated to the given griduser, and + * whose SURLs are contained in the supplied array of Strings. + */ + public synchronized Collection findReduced(String griduser, + int[] surlUniqueIDs, String[] surls) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + Collection results = Lists.newArrayList(); + + try { + /* + * NOTE: we search also on the fromSurl because otherwise we lost all request_get that have + * not the uniqueID set because are not yet been used by anybody + */ + con = getConnection(); + // get reduced chunks + String str = + "SELECT sg.statusCode, rg.ID, rg.sourceSURL, rg.normalized_sourceSURL_StFN, rg.sourceSURL_uniqueID " + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE rq.client_dn=? AND ( rg.sourceSURL_uniqueID IN " + + makeSURLUniqueIDWhere(surlUniqueIDs) + " AND rg.sourceSURL IN " + + makeSurlString(surls) + " ) "; + find = con.prepareStatement(str); + find.setString(1, griduser); + log.trace("PtG CHUNK DAO! findReduced with griduser+surlarray; {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + ReducedPtGChunkDataTO chunkDataTO = new ReducedPtGChunkDataTO(); + chunkDataTO.setStatus(rs.getInt("sg.statusCode")); + chunkDataTO.setPrimaryKey(rs.getLong("rg.ID")); + chunkDataTO.setFromSURL(rs.getString("rg.sourceSURL")); + chunkDataTO.setNormalizedStFN(rs.getString("rg.normalized_sourceSURL_StFN")); + int uniqueID = rs.getInt("rg.sourceSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(uniqueID); + } + results.add(chunkDataTO); + } + } catch (SQLException e) { + log.error("PTG CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. + * + * This method attempts to change the status of the request to SRM_FAILURE and record it in the + * DB. + * + * This operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messagges where recorded. + * + * Yet it soon became clear that the source of malformed data were the clients and/or FE recording + * info in the DB. In these circumstances the client would see its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized void fail(PtGChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement update = null; + + try { + + con = getConnection(); + update = con.prepareStatement(UPDATE_STATUS_GET_WHERE_REQUEST_GET_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + update.setString(2, "Request is malformed!"); + update.setLong(3, auxTO.primaryKey()); + log.trace("PTG CHUNK DAO: signalMalformed; {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("PtGChunkDAO! Unable to signal in DB that the request was " + + "malformed! Request: {}; Exception: {}", auxTO.toString(), e.toString()); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method that returns the number of Get requests on the given SURL, that are in SRM_FILE_PINNED + * state. + * + * This method is intended to be used by PtGChunkCatalog in the isSRM_FILE_PINNED method + * invocation. + * + * In case of any error, 0 is returned. + */ + // request_Get table + public synchronized int numberInSRM_FILE_PINNED(int surlUniqueID) { + + return count(surlUniqueID, SRM_FILE_PINNED); + } + + public synchronized int count(int surlUniqueID, TStatusCode status) { + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + int count = 0; + + try { + con = getConnection(); + find = con.prepareStatement(COUNT_REQUEST_ON_SURL_WITH_STATUS); + find.setInt(1, surlUniqueID); + find.setInt(2, statusCodeConverter.toDB(status)); + log.trace("PtG CHUNK DAO - numberInSRM_FILE_PINNED method: {}", find); + rs = find.executeQuery(); + + if (rs.next()) { + count = rs.getInt(1); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to determine numberInSRM_FILE_PINNED! " + "Returning 0! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return count; + } + + /** + * Method that updates all expired requests in SRM_FILE_PINNED state, into SRM_RELEASED. + * + * This is needed when the client forgets to invoke srmReleaseFiles(). + * + * @return + */ + public synchronized Collection transitExpiredSRM_FILE_PINNED() { + + Map expiredSurlMap = Maps.newHashMap(); + Set pinnedSurlSet = Sets.newHashSet(); + + Connection con = null; + PreparedStatement findExpired = null; + PreparedStatement updateExpired = null; + PreparedStatement findPtgPinnedSurls = null; + PreparedStatement findBolPinnedSurls = null; + ResultSet expired = null; + ResultSet ptgPinnedSurls = null; + ResultSet bolPinnedSurls = null; + + /* Find all expired SURLs */ + try { + // start transaction + con = getManagedConnection(); + + findExpired = con.prepareStatement(SELECT_EXPIRED_REQUESTS); + findExpired.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + expired = findExpired.executeQuery(); + + while (expired.next()) { + String sourceSURL = expired.getString("rg.sourceSURL"); + Integer uniqueID = new Integer(expired.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not set compute it */ + if (expired.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}: " + + "InvalidTSURLAttributesException {}", sourceSURL, e.getMessage(), e); + } + } + expiredSurlMap.put(sourceSURL, uniqueID); + } + + if (expiredSurlMap.isEmpty()) { + con.commit(); + log.trace( + "PtGChunkDAO! No chunk of PtG request was transited from SRM_FILE_PINNED to SRM_RELEASED."); + return Lists.newArrayList(); + } + + updateExpired = con.prepareStatement(UPDATE_STATUS_OF_EXPIRED_REQUESTS); + updateExpired.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + updateExpired.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitExpiredSRM_FILE_PINNED method: {}", updateExpired); + int count = updateExpired.executeUpdate(); + + if (count == 0) { + log.trace("PtGChunkDAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtGChunkDAO! {} chunks of PtG requests were transited from" + + " SRM_FILE_PINNED to SRM_RELEASED.", count); + } + + /* + * in order to enhance performance here we can check if there is any file system with tape + * (T1D0, T1D1), if there is not any we can skip the following + */ + + /* Find all not expired SURLs from PtG and BoL */ + + findPtgPinnedSurls = con.prepareStatement(SELECT_PTG_PINNED_SURLS); + findPtgPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_FILE_PINNED)); + + ptgPinnedSurls = findPtgPinnedSurls.executeQuery(); + + while (ptgPinnedSurls.next()) { + String sourceSURL = ptgPinnedSurls.getString("rg.sourceSURL"); + Integer uniqueID = new Integer(ptgPinnedSurls.getInt("rg.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (ptgPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage()); + } + } + pinnedSurlSet.add(uniqueID); + } + + // SURLs pinned by BoLs + findBolPinnedSurls = con.prepareStatement(SELECT_BOL_PINNED_SURLS); + findBolPinnedSurls.setInt(1, statusCodeConverter.toDB(SRM_SUCCESS)); + bolPinnedSurls = findBolPinnedSurls.executeQuery(); + + while (bolPinnedSurls.next()) { + String sourceSURL = bolPinnedSurls.getString("rb.sourceSURL"); + Integer uniqueID = new Integer(bolPinnedSurls.getInt("rb.sourceSURL_uniqueID")); + /* If the uniqueID is not setted compute it */ + if (bolPinnedSurls.wasNull()) { + try { + TSURL tsurl = TSURL.makeFromStringWellFormed(sourceSURL); + uniqueID = tsurl.uniqueId(); + } catch (InvalidTSURLAttributesException e) { + log.warn("PtGChunkDAO! unable to build the TSURL from {}. " + + "InvalidTSURLAttributesException: {}", sourceSURL, e.getMessage(), e); + } + } + pinnedSurlSet.add(uniqueID); + } + + con.commit(); + } catch (SQLException e) { + log.error("PtGChunkDAO! SQLException. {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(findExpired); + closeStatement(updateExpired); + closeStatement(findPtgPinnedSurls); + closeStatement(findBolPinnedSurls); + closeResultSet(expired); + closeResultSet(ptgPinnedSurls); + closeResultSet(bolPinnedSurls); + closeConnection(con); + } + + Collection expiredSurlList = Lists.newArrayList(); + /* Remove the Extended Attribute pinned if there is not a valid SURL on it */ + TSURL surl; + for (Entry surlEntry : expiredSurlMap.entrySet()) { + if (!pinnedSurlSet.contains(surlEntry.getValue())) { + try { + surl = TSURL.makeFromStringValidate(surlEntry.getKey()); + } catch (InvalidTSURLAttributesException e) { + log.error("Invalid SURL, cannot release the pin " + "(Extended Attribute): {}", + surlEntry.getKey()); + continue; + } + expiredSurlList.add(surl); + StoRI stori; + try { + stori = NamespaceDirector.getNamespace().resolveStoRIbySURL(surl); + } catch (Throwable e) { + log.error("Invalid SURL {} cannot release the pin. {}: {}", surlEntry.getKey(), + e.getClass().getCanonicalName(), e.getMessage(), e); + continue; + } + + if (stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + StormEA.removePinned(stori.getAbsolutePath()); + } + } + } + return expiredSurlList; + } + + /** + * Method that updates all chunks in SRM_FILE_PINNED state, into SRM_RELEASED. An array of long + * representing the primary key of each chunk is required: only they get the status changed + * provided their current status is SRM_FILE_PINNED. + * + * This method is used during srmReleaseFiles + * + * In case of any error nothing happens and no exception is thrown, but proper messagges get + * logged. + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids) { + + String str = "UPDATE status_Get sg SET sg.statusCode=? " + + "WHERE sg.statusCode=? AND sg.request_GetID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was " + + "transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited " + + "from SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks" + " from SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * @param ids + * @param token + */ + public synchronized void transitSRM_FILE_PINNEDtoSRM_RELEASED(long[] ids, TRequestToken token) { + + if (token == null) { + transitSRM_FILE_PINNEDtoSRM_RELEASED(ids); + return; + } + + /* + * If a request token has been specified, only the related Get requests have to be released. + * This is done adding the r.r_token="..." clause in the where subquery. + */ + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? " + "WHERE sg.statusCode=? AND rq.r_token='" + token.getValue() + + "' AND rg.ID IN " + makeWhereString(ids); + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(SRM_RELEASED)); + stmt.setInt(2, statusCodeConverter.toDB(SRM_FILE_PINNED)); + log.trace("PtG CHUNK DAO - transitSRM_FILE_PINNEDtoSRM_RELEASED: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was" + + " transited from SRM_FILE_PINNED to SRM_RELEASED."); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were transited from " + + "SRM_FILE_PINNED to SRM_RELEASED.", count); + } + } catch (SQLException e) { + log.error( + "PtG CHUNK DAO! Unable to transit chunks from " + "SRM_FILE_PINNED to SRM_RELEASED! {}", + e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatus(TRequestToken requestToken, int[] surlUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + String str = "UPDATE " + + "status_Get sg JOIN (request_Get rg, request_queue rq) ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=? , sg.explanation=? " + "WHERE rq.r_token='" + requestToken.toString() + + "' AND ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + stmt.setString(2, (explanation != null ? explanation : "")); + log.trace("PtG CHUNK DAO - updateStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated to {}.", statusCode); + } else { + log.info("PtG CHUNK DAO! {} chunks of PtG requests were updated to {}.", count, statusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated to {}! {}", statusCode, e.getMessage(), e); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + public synchronized void updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, newStatusCode, + explanation, true, false, true); + } + + private synchronized void doUpdateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode, String explanation, boolean withRequestToken, boolean withSurls, + boolean withExplanation) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlUniqueIDs == null || surls == null))) { + + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlUniqueIDs=" + surlUniqueIDs + " surls=" + + surls + " withExplaination=" + withExplanation + " explanation=" + explanation); + } + + String str = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + "SET sg.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sg.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PtG CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + int count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PtG CHUNK DAO! No chunk of PtG request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PtG CHUNK DAO! {} chunks of PtG requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PtG CHUNK DAO! Unable to updated from {} to {}! {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeWhereString(long[] rowids) { + + StringBuilder sb = new StringBuilder("("); + int n = rowids.length; + for (int i = 0; i < n; i++) { + sb.append(rowids[i]); + if (i < (n - 1)) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURL's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all SURLs. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage()); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + + private String buildExpainationSet(String explanation) { + + return " sg.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rg.sourceSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rg.sourceSURL IN " + makeSurlString(surls) + " ) "; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java new file mode 100644 index 000000000..48eaceb0f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/PtPChunkDAOMySql.java @@ -0,0 +1,798 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.catalogs.ChunkDAOUtils.buildInClauseForArray; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_FILE_LIFETIME_EXPIRED; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import it.grid.storm.namespace.NamespaceException; +import it.grid.storm.namespace.naming.SURL; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.PtPChunkDAO; +import it.grid.storm.persistence.model.PtPChunkDataTO; +import it.grid.storm.persistence.model.ReducedPtPChunkDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for PtPChunkCatalog. This DAO is specifically designed to connect to a MySQL DB. The + * raw data found in those tables is pre-treated in order to turn it into the Object Model of StoRM. + * See Method comments for further info. BEWARE! DAO Adjusts for extra fields in the DB that are not + * present in the object model. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class PtPChunkDAOMySql extends AbstractDAO implements PtPChunkDAO { + + private static final Logger log = LoggerFactory.getLogger(PtPChunkDAOMySql.class); + + private static final String UPDATE_REQUEST_PUT_WHERE_ID_IS = "UPDATE " + + "request_queue rq JOIN (status_Put sp, request_Put rp) ON " + + "(rq.ID=rp.request_queueID AND sp.request_PutID=rp.ID) " + + "SET sp.transferURL=?, sp.statusCode=?, sp.explanation=?, rq.pinLifetime=?, rq.fileLifetime=?, " + + "rq.config_FileStorageTypeID=?, rq.config_OverwriteID=?, " + + "rp.normalized_targetSURL_StFN=?, rp.targetSURL_uniqueID=? " + "WHERE rp.ID=?"; + + private static final String UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS = + "UPDATE request_Put SET normalized_targetSURL_StFN=?, targetSURL_uniqueID=? " + "WHERE ID=?"; + + private static final String SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS = + "SELECT tp.config_ProtocolsID " + + "FROM request_TransferProtocols tp JOIN request_queue rq ON tp.request_queueID=rq.ID " + + "WHERE rq.r_token=?"; + + private static final String SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS = + "SELECT rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, sp.statusCode " + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE rq.r_token=? AND sp.statusCode<>?"; + + private static final String UPDATE_STATUS_PUT_WHERE_ID_IS = + "UPDATE status_Put sp SET sp.statusCode=?, sp.explanation=? WHERE sp.request_PutID=?"; + + private static final String SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS = + "SELECT rp.ID, rp.targetSURL " + + "FROM status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + private static PtPChunkDAO instance; + + public static synchronized PtPChunkDAO getInstance() { + if (instance == null) { + instance = new PtPChunkDAOMySql(); + } + return instance; + } + + private StatusCodeConverter statusCodeConverter; + + private PtPChunkDAOMySql() { + + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + } + + /** + * Method used to save the changes made to a retrieved PtPChunkDataTO, back into the MySQL DB. + * Only the transferURL, statusCode and explanation, of status_Put table get written to the DB. + * Likewise for the pinLifetime and fileLifetime of request_queue. In case of any error, an error + * message gets logged but no exception is thrown. + */ + public synchronized void update(PtPChunkDataTO to) { + + Connection con = null; + PreparedStatement updatePut = null; + try { + con = getConnection(); + updatePut = con.prepareStatement(UPDATE_REQUEST_PUT_WHERE_ID_IS); + + updatePut.setString(1, to.transferURL()); + updatePut.setInt(2, to.status()); + updatePut.setString(3, to.errString()); + updatePut.setInt(4, to.pinLifetime()); + updatePut.setInt(5, to.fileLifetime()); + updatePut.setString(6, to.fileStorageType()); + updatePut.setString(7, to.overwriteOption()); + updatePut.setString(8, to.normalizedStFN()); + updatePut.setInt(9, to.surlUniqueID()); + updatePut.setLong(10, to.primaryKey()); + // run updateStatusPut... + log.trace("PtP CHUNK DAO - update method: {}", updatePut); + updatePut.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(updatePut); + closeConnection(con); + } + } + + /** + * Updates the request_Put represented by the received ReducedPtPChunkDataTO by setting its + * normalized_targetSURL_StFN and targetSURL_uniqueID + * + * @param chunkTO + */ + public synchronized void updateIncomplete(ReducedPtPChunkDataTO chunkTO) { + + Connection con = null; + PreparedStatement stmt = null; + + try { + con = getConnection(); + stmt = con.prepareStatement(UPDATE_REDUCED_REQUEST_PUT_WHERE_ID_IS); + stmt.setString(1, chunkTO.normalizedStFN()); + stmt.setInt(2, chunkTO.surlUniqueID()); + stmt.setLong(3, chunkTO.primaryKey()); + log.trace("PtP CHUNK DAO - update incomplete: {}", stmt); + stmt.executeUpdate(); + } catch (SQLException e) { + log.error("PtP CHUNK DAO: Unable to complete update incomplete! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that queries the MySQL DB to find all entries matching the supplied TRequestToken. The + * Collection contains the corresponding PtPChunkDataTO objects. An initial simple query + * establishes the list of protocols associated with the request. A second complex query + * establishes all chunks associated with the request, by properly joining request_queue, + * request_Put and status_Put. The considered fields are: (1) From status_Put: the ID field which + * becomes the TOs primary key, and statusCode. (2) From request_Put: targetSURL and + * expectedFileSize. (3) From request_queue: pinLifetime, fileLifetime, config_FileStorageTypeID, + * s_token, config_OverwriteID. In case of any error, a log gets written and an empty collection + * is returned. No exception is returned. NOTE! Chunks in SRM_ABORTED status are NOT returned! + * This is important because this method is intended to be used by the Feeders to fetch all chunks + * in the request, and aborted chunks should not be picked up for processing! + */ + public synchronized Collection find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement findProtocols = null; + PreparedStatement findRequest = null; + ResultSet rsProtocols = null; + ResultSet rsRequest = null; + + Collection results = Lists.newArrayList(); + + try { + + con = getManagedConnection(); + findProtocols = con.prepareStatement(SELECT_REQUEST_PROTOCOLS_WHERE_TOKEN_IS); + + findProtocols.setString(1, requestToken.getValue()); + + log.trace("PtP CHUNK DAO - find method: {}", findProtocols); + rsProtocols = findProtocols.executeQuery(); + + List protocols = Lists.newArrayList(); + while (rsProtocols.next()) { + protocols.add(rsProtocols.getString("tp.config_ProtocolsID")); + } + + // get chunks of the request + findRequest = con.prepareStatement(SELECT_FULL_REQUEST_PUT_WHERE_TOKEN_AND_STATUS); + findRequest.setString(1, requestToken.getValue()); + findRequest.setInt(2, statusCodeConverter.toDB(SRM_ABORTED)); + log.trace("PtP CHUNK DAO - find method: {}", findRequest); + rsRequest = findRequest.executeQuery(); + + while (rsRequest.next()) { + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rsRequest.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rsRequest.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rsRequest.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rsRequest.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rsRequest.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rsRequest.getString("rq.s_token")); + chunkDataTO.setClientDN(rsRequest.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rsRequest.getBlob("rq.proxy"); + if (!rsRequest.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rsRequest.getLong("rp.ID")); + chunkDataTO.setToSURL(rsRequest.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rsRequest.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rsRequest.getInt("rp.targetSURL_uniqueID"); + if (!rsRequest.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rsRequest.getLong("rp.expectedFileSize")); + chunkDataTO.setProtocolList(protocols); + chunkDataTO.setRequestToken(requestToken.getValue()); + chunkDataTO.setStatus(rsRequest.getInt("sp.statusCode")); + results.add(chunkDataTO); + } + con.commit(); + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rsProtocols); + closeResultSet(rsRequest); + closeStatement(findProtocols); + closeStatement(findRequest); + closeConnection(con); + } + return results; + } + + /** + * Method used in extraordinary situations to signal that data retrieved from the DB was malformed + * and could not be translated into the StoRM object model. This method attempts to change the + * status of the chunk to SRM_FAILURE and record it in the DB, in the status_Put table. This + * operation could potentially fail because the source of the malformed problems could be a + * problematic DB; indeed, initially only log messages were recorded. Yet it soon became clear + * that the source of malformed data were actually the clients themselves and/or FE recording in + * the DB. In these circumstances the client would find its request as being in the + * SRM_IN_PROGRESS state for ever. Hence the pressing need to inform it of the encountered + * problems. + */ + public synchronized int fail(PtPChunkDataTO auxTO) { + + Connection con = null; + PreparedStatement signal = null; + int updated = 0; + + try { + con = getConnection(); + signal = con.prepareStatement(UPDATE_STATUS_PUT_WHERE_ID_IS); + signal.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + signal.setString(2, "This chunk of the request is malformed!"); + signal.setLong(3, auxTO.primaryKey()); + log.trace("PtP CHUNK DAO - signalMalformedPtPChunk method: {}", signal); + updated = signal.executeUpdate(); + } catch (SQLException e) { + log.error( + "PtPChunkDAO! Unable to signal in DB that a chunk of " + + "the request was malformed! Request: {}; Error: {}", + auxTO.toString(), e.getMessage(), e); + e.printStackTrace(); + updated = 0; + } finally { + closeStatement(signal); + closeConnection(con); + } + return updated; + } + + /** + * Method that retrieves all expired requests in SRM_SPACE_AVAILABLE state. + * + * @return a Map containing the ID of the request as key and the relative SURL as value + */ + public synchronized Map getExpiredSRM_SPACE_AVAILABLE() { + + return getExpired(SRM_SPACE_AVAILABLE); + } + + public synchronized Map getExpired(TStatusCode status) { + + Map expiredRequests = Maps.newHashMap(); + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + + con = getConnection(); + find = con.prepareStatement(SELECT_EXPIRED_REQUEST_PUT_WHERE_STATUS_IS); + find.setInt(1, statusCodeConverter.toDB(status)); + log.trace("PtP CHUNK DAO - getExpiredSRM_SPACE_AVAILABLE: {}", find); + rs = find.executeQuery(); + while (rs.next()) { + expiredRequests.put(rs.getLong("rp.ID"), rs.getString("rp.targetSURL")); + } + + } catch (SQLException e) { + + log.error("PtPChunkDAO! Unable to select expired " + + "SRM_SPACE_AVAILABLE chunks of PtP requests. {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + return expiredRequests; + } + + /** + * Method that updates chunks in SRM_SPACE_AVAILABLE state, into SRM_FILE_LIFETIME_EXPIRED. An + * array of Long representing the primary key of each chunk is required. This is needed when the + * client forgets to invoke srmPutDone(). In case of any error or exception, the returned int + * value will be zero or less than the input List size. + * + * @param the list of the request id to update + * + * @return The number of the updated records into the db + */ + public synchronized int transitExpiredSRM_SPACE_AVAILABLEtoSRM_FILE_LIFETIME_EXPIRED( + Collection ids) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + String querySQL = "UPDATE status_Put sp " + + "JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=?, sp.explanation=? " + + "WHERE sp.statusCode=? AND UNIX_TIMESTAMP(NOW())-UNIX_TIMESTAMP(rq.timeStamp) >= rq.pinLifetime "; + + if (!ids.isEmpty()) { + querySQL += "AND rp.ID IN (" + StringUtils.join(ids.toArray(), ',') + ")"; + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(SRM_FILE_LIFETIME_EXPIRED)); + stmt.setString(2, "Expired pinLifetime"); + stmt.setInt(3, statusCodeConverter.toDB(SRM_SPACE_AVAILABLE)); + + log.trace("PtP CHUNK DAO - transit SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED: {}", + stmt); + + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_SPACE_AVAILABLE to SRM_FILE_LIFETIME_EXPIRED.", count); + return count; + } + + public synchronized int transitLongTimeInProgressRequestsToStatus(long expirationTime, TStatusCode status, String explanation) { + + String sql = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND)"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setInt(1, statusCodeConverter.toDB(status)); + stmt.setInt(2, statusCodeConverter.toDB(status)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + stmt.setLong(5, expirationTime); + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to {}: {}", status, stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatus(Collection ids, TStatusCode fromStatus, + TStatusCode toStatus, String explanation) { + + Preconditions.checkNotNull(ids, "Invalid list of id"); + + if (ids.isEmpty()) { + return 0; + } + + String querySQL = "UPDATE request_queue rq, request_Put rp, status_Put sp " + + "SET rq.status=?, sp.statusCode=?, sp.explanation=? " + + "WHERE rq.ID = rp.request_queueID and rp.ID = sp.request_PutID " + + "AND rq.status=? AND rq.ID IN (" + buildInClauseForArray(ids.size()) + ")"; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(querySQL); + stmt.setInt(1, statusCodeConverter.toDB(toStatus)); + stmt.setInt(2, statusCodeConverter.toDB(toStatus)); + stmt.setString(3, explanation); + stmt.setInt(4, statusCodeConverter.toDB(fromStatus)); + int i = 5; + for (Long id : ids) { + stmt.setLong(i, id); + i++; + } + log.trace("PtP CHUNK DAO - transit SRM_REQUEST_INPROGRESS to SRM_FAILURE: {}", stmt); + count = stmt.executeUpdate(); + + } catch (SQLException e) { + log.error("PtPChunkDAO! Unable to transit chunks from " + + "SRM_REQUEST_INPROGRESS to SRM_FAILURE! {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + log.trace("PtPChunkDAO! {} chunks of PtP requests were transited " + + "from SRM_REQUEST_INPROGRESS to SRM_FAILURE.", count); + return count; + } + + public synchronized int updateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode statusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatus(requestToken, surlsUniqueIDs, surls, statusCode, explanation, true, true); + } + + private int doUpdateStatus(TRequestToken requestToken, int[] surlsUniqueIDs, String[] surls, + TStatusCode statusCode, String explanation, boolean withRequestToken, + boolean withExplaination) throws IllegalArgumentException { + + if ((withRequestToken && requestToken == null) || (withExplaination && explanation == null)) { + throw new IllegalArgumentException( + "Unable to perform the updateStatus, " + "invalid arguments: withRequestToken=" + + withRequestToken + " requestToken=" + requestToken + " withExplaination=" + + withExplaination + " explaination=" + explanation); + } + + String str = + "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND " + + "rp.request_queueID=rq.ID " + "SET sp.statusCode=? "; + if (withExplaination) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE "; + if (withRequestToken) { + str += buildTokenWhereClause(requestToken) + " AND "; + } + str += " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(statusCode)); + + log.trace("PTP CHUNK DAO - updateStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated to {}.", statusCode); + } else { + log.info("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "to {}.", count, + statusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from to {}! {}", statusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || explanation == null) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + " explanation=" + explanation); + } + return doUpdateStatusOnMatchingStatus(requestToken, null, null, expectedStatusCode, + newStatusCode, explanation, true, false, true); + } + + public synchronized int updateStatusOnMatchingStatus(TRequestToken requestToken, + int[] surlsUniqueIDs, String[] surls, TStatusCode expectedStatusCode, + TStatusCode newStatusCode) { + + if (requestToken == null || requestToken.getValue().trim().isEmpty() || surlsUniqueIDs == null + || surls == null || surlsUniqueIDs.length == 0 || surls.length == 0 + || surlsUniqueIDs.length != surls.length) { + throw new IllegalArgumentException("Unable to perform the updateStatusOnMatchingStatus, " + + "invalid arguments: requestToken=" + requestToken + "surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls); + } + return doUpdateStatusOnMatchingStatus(requestToken, surlsUniqueIDs, surls, expectedStatusCode, + newStatusCode, null, true, true, false); + } + + private int doUpdateStatusOnMatchingStatus(TRequestToken requestToken, int[] surlsUniqueIDs, + String[] surls, TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation, + boolean withRequestToken, boolean withSurls, boolean withExplanation) { + + if ((withRequestToken && requestToken == null) || (withExplanation && explanation == null) + || (withSurls && (surlsUniqueIDs == null || surls == null))) { + throw new IllegalArgumentException("Unable to perform the doUpdateStatusOnMatchingStatus, " + + "invalid arguments: withRequestToken=" + withRequestToken + " requestToken=" + + requestToken + " withSurls=" + withSurls + " surlsUniqueIDs=" + surlsUniqueIDs + + " surls=" + surls + " withExplaination=" + withExplanation + " explanation=" + + explanation); + } + + String str = "UPDATE " + + "status_Put sp JOIN (request_Put rp, request_queue rq) ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=? "; + if (withExplanation) { + str += " , " + buildExpainationSet(explanation); + } + str += " WHERE sp.statusCode=? "; + if (withRequestToken) { + str += " AND " + buildTokenWhereClause(requestToken); + } + if (withSurls) { + str += " AND " + buildSurlsWhereClause(surlsUniqueIDs, surls); + } + + Connection con = null; + PreparedStatement stmt = null; + int count = 0; + + try { + con = getConnection(); + stmt = con.prepareStatement(str); + stmt.setInt(1, statusCodeConverter.toDB(newStatusCode)); + stmt.setInt(2, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("PTP CHUNK DAO - updateStatusOnMatchingStatus: {}", stmt); + count = stmt.executeUpdate(); + if (count == 0) { + log.trace("PTP CHUNK DAO! No chunk of PTP request was updated " + "from {} to {}.", + expectedStatusCode, newStatusCode); + } else { + log.debug("PTP CHUNK DAO! {} chunks of PTP requests were updated " + "from {} to {}.", + count, expectedStatusCode, newStatusCode); + } + } catch (SQLException e) { + log.error("PTP CHUNK DAO! Unable to updated from {} to {}! Error: {}", expectedStatusCode, + newStatusCode, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + return count; + } + + public Collection find(int[] surlsUniqueIDs, String[] surlsArray, String dn) { + + if (surlsUniqueIDs == null || surlsUniqueIDs.length == 0 || surlsArray == null + || surlsArray.length == 0 || dn == null) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " dn=" + dn); + } + return find(surlsUniqueIDs, surlsArray, dn, true); + } + + private synchronized Collection find(int[] surlsUniqueIDs, String[] surlsArray, + String dn, boolean withDn) throws IllegalArgumentException { + + if ((withDn && dn == null) || surlsUniqueIDs == null || surlsUniqueIDs.length == 0 + || surlsArray == null || surlsArray.length == 0) { + throw new IllegalArgumentException( + "Unable to perform the find, " + "invalid arguments: surlsUniqueIDs=" + surlsUniqueIDs + + " surlsArray=" + surlsArray + " withDn=" + withDn + " dn=" + dn); + } + + Connection con = null; + PreparedStatement find = null; + ResultSet rs = null; + + try { + // get chunks of the request + String str = + "SELECT rq.ID, rq.r_token, rq.config_FileStorageTypeID, rq.config_OverwriteID, rq.timeStamp, rq.pinLifetime, rq.fileLifetime, " + + "rq.s_token, rq.client_dn, rq.proxy, rp.ID, rp.targetSURL, rp.expectedFileSize, rp.normalized_targetSURL_StFN, rp.targetSURL_uniqueID, " + + "sp.statusCode " + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surlsArray) + " )"; + + if (withDn) { + str += " AND rq.client_dn=\'" + dn + "\'"; + } + + con = getConnection(); + find = con.prepareStatement(str); + + List list = Lists.newArrayList(); + + log.trace("PtP CHUNK DAO - find method: {}", find); + rs = find.executeQuery(); + + while (rs.next()) { + + PtPChunkDataTO chunkDataTO = new PtPChunkDataTO(); + chunkDataTO.setFileStorageType(rs.getString("rq.config_FileStorageTypeID")); + chunkDataTO.setOverwriteOption(rs.getString("rq.config_OverwriteID")); + chunkDataTO.setTimeStamp(rs.getTimestamp("rq.timeStamp")); + chunkDataTO.setPinLifetime(rs.getInt("rq.pinLifetime")); + chunkDataTO.setFileLifetime(rs.getInt("rq.fileLifetime")); + chunkDataTO.setSpaceToken(rs.getString("rq.s_token")); + chunkDataTO.setClientDN(rs.getString("rq.client_dn")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = rs.getBlob("rq.proxy"); + if (!rs.wasNull() && blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + chunkDataTO.setVomsAttributes(new String(bdata)); + } + chunkDataTO.setPrimaryKey(rs.getLong("rp.ID")); + chunkDataTO.setToSURL(rs.getString("rp.targetSURL")); + + chunkDataTO.setNormalizedStFN(rs.getString("rp.normalized_targetSURL_StFN")); + int uniqueID = rs.getInt("rp.targetSURL_uniqueID"); + if (!rs.wasNull()) { + chunkDataTO.setSurlUniqueID(Integer.valueOf(uniqueID)); + } + + chunkDataTO.setExpectedFileSize(rs.getLong("rp.expectedFileSize")); + chunkDataTO.setRequestToken(rs.getString("rq.r_token")); + chunkDataTO.setStatus(rs.getInt("sp.statusCode")); + list.add(chunkDataTO); + } + return list; + } catch (SQLException e) { + log.error("PTP CHUNK DAO: {}", e.getMessage(), e); + /* return empty Collection! */ + return Lists.newArrayList(); + } finally { + closeResultSet(rs); + closeStatement(find); + closeConnection(con); + } + } + + private String buildExpainationSet(String explanation) { + + return " sp.explanation='" + explanation + "' "; + } + + private String buildTokenWhereClause(TRequestToken requestToken) { + + return " rq.r_token='" + requestToken.toString() + "' "; + } + + private String buildSurlsWhereClause(int[] surlsUniqueIDs, String[] surls) { + + return " ( rp.targetSURL_uniqueID IN " + makeSURLUniqueIDWhere(surlsUniqueIDs) + + " AND rp.targetSURL IN " + makeSurlString(surls) + " ) "; + } + + /** + * Method that returns a String containing all Surl's IDs. + */ + private String makeSURLUniqueIDWhere(int[] surlUniqueIDs) { + + StringBuilder sb = new StringBuilder("("); + for (int i = 0; i < surlUniqueIDs.length; i++) { + if (i > 0) { + sb.append(","); + } + sb.append(surlUniqueIDs[i]); + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all Surls. + */ + private String makeSurlString(String[] surls) { + + StringBuilder sb = new StringBuilder("("); + int n = surls.length; + + for (int i = 0; i < n; i++) { + + SURL requestedSURL; + + try { + requestedSURL = SURL.makeSURLfromString(surls[i]); + } catch (NamespaceException e) { + log.error(e.getMessage(), e); + log.debug("Skip '{}' during query creation", surls[i]); + continue; + } + + sb.append("'"); + sb.append(requestedSURL.getNormalFormAsString()); + sb.append("','"); + sb.append(requestedSURL.getQueryFormAsString()); + sb.append("'"); + + if (i < (n - 1)) { + sb.append(","); + } + } + + sb.append(")"); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java new file mode 100644 index 000000000..a6b3c9a51 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/RequestSummaryDAOMySql.java @@ -0,0 +1,920 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TRequestType.EMPTY; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_GET; +import static it.grid.storm.srm.types.TRequestType.PREPARE_TO_PUT; +import static it.grid.storm.srm.types.TStatusCode.SRM_ABORTED; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_INPROGRESS; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Iterator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.converter.RequestTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.RequestSummaryDAO; +import it.grid.storm.persistence.model.RequestSummaryDataTO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TStatusCode; + +/** + * DAO class for RequestSummaryCatalog. This DAO is specifically designed to connect to a MySQL DB. + * + * @author EGRID ICTP + * @version 3.0 + * @date May 2005 + */ +public class RequestSummaryDAOMySql extends AbstractDAO implements RequestSummaryDAO { + + private static final Logger log = LoggerFactory.getLogger(RequestSummaryDAOMySql.class); + + private static final String SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT = + "SELECT ID, config_RequestTypeID, r_token, timeStamp, client_dn, proxy " + + "FROM request_queue WHERE status=? LIMIT ?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_ID_IN = + "UPDATE request_queue SET status=?, errstring=? WHERE ID IN ?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_ID_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE ID=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=?"; + + private static final String UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE = + "UPDATE request_queue SET status=?, errstring=? WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS = + "UPDATE request_queue " + + "SET status=?, errstring=?, pinLifetime=pinLifetime+(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(timeStamp)) " + + "WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_IS = + "SELECT ID, config_RequestTypeID from request_queue WHERE r_token=?"; + + private static final String SELECT_FULL_REQUEST_WHERE_TOKEN_IS = + "SELECT * from request_queue WHERE r_token=?"; + + private static final String SELECT_REQUEST_WHERE_TOKEN_AND_STATUS = + "SELECT ID, config_RequestTypeID FROM request_queue WHERE r_token=? AND status=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS = "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS = "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS = "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=?"; + + private static final String UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN = "UPDATE status_Get s " + + "JOIN (request_queue r, request_Get t) ON (s.request_GetID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN ?"; + + private static final String UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN = "UPDATE status_Put s " + + "JOIN (request_queue r, request_Put t) ON (s.request_PutID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND targetSURL IN ?"; + + private static final String UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN = "UPDATE status_BoL s " + + "JOIN (request_queue r, request_BoL t) ON (s.request_BoLID=t.ID AND t.request_queueID=r.ID) " + + "SET s.statusCode=?, s.explanation=? WHERE r.ID=? AND sourceSURL IN ?"; + + private static final String SELECT_PURGEABLE_REQUESTS_WITH_LIMIT = + "SELECT ID, r_token FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? LIMIT ?"; + + private static final String COUNT_PURGEABLE_REQUESTS = "SELECT count(*) FROM request_queue " + + "WHERE UNIX_TIMESTAMP(NOW()) - UNIX_TIMESTAMP(timeStamp) > ? AND status <> ? AND status <> ? "; + + private static final String DELETE_REQUEST_WHERE_ID_IN = + "DELETE FROM request_queue WHERE ID in ?"; + + private static final String DELETE_ORPHANS_DIR_OPTION = + "DELETE request_DirOption FROM request_DirOption " + + " LEFT JOIN request_Get ON request_DirOption.ID = request_Get.request_DirOptionID" + + " LEFT JOIN request_BoL ON request_DirOption.ID = request_BoL.request_DirOptionID " + + " LEFT JOIN request_Copy ON request_DirOption.ID = request_Copy.request_DirOptionID" + + " WHERE request_Copy.request_DirOptionID IS NULL AND" + + " request_Get.request_DirOptionID IS NULL AND" + + " request_BoL.request_DirOptionID IS NULL;"; + + private static RequestSummaryDAO instance; + + private final StatusCodeConverter statusCodeConverter; + private final RequestTypeConverter requestTypeConverter; + private final int MAX_FETCHED_REQUESTS = Configuration.getInstance().getPickingMaxBatchSize(); + + public static synchronized RequestSummaryDAO getInstance() { + if (instance == null) { + instance = new RequestSummaryDAOMySql(); + } + return instance; + } + + private RequestSummaryDAOMySql() { + super(StormDbConnectionPool.getInstance()); + statusCodeConverter = StatusCodeConverter.getInstance(); + requestTypeConverter = RequestTypeConverter.getInstance(); + } + + /** + * Method that retrieves requests in the SRM_REQUEST_QUEUED status: retrieved requests are limited + * to the number specified by the Configuration method getPicker2MaxBatchSize. All retrieved + * requests get their global status transited to SRM_REQUEST_INPROGRESS. A Collection of + * RequestSummaryDataTO is returned: if none are found, an empty collection is returned. + */ + public synchronized Collection fetchNewRequests(int limit) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement update = null; + ResultSet fetched = null; + Collection results = Lists.newArrayList(); + int howMuch = limit > MAX_FETCHED_REQUESTS ? MAX_FETCHED_REQUESTS : limit; + + try { + con = getManagedConnection(); + + // get id, request type, request token and client_DN of newly added + // requests, which must be in SRM_REQUEST_QUEUED state + fetch = con.prepareStatement(SELECT_REQUEST_WHERE_STATUS_WITH_LIMIT); + fetch.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(2, howMuch); + fetched = fetch.executeQuery(); + + Collection rowids = Lists.newArrayList(); + + while (fetched.next()) { + long id = fetched.getLong("ID"); + rowids.add(Long.valueOf(id)); + RequestSummaryDataTO aux = new RequestSummaryDataTO(); + aux.setPrimaryKey(id); + aux.setRequestType(fetched.getString("config_RequestTypeID")); + aux.setRequestToken(fetched.getString("r_token")); + aux.setClientDN(fetched.getString("client_dn")); + aux.setTimestamp(fetched.getTimestamp("timeStamp")); + + /** + * This code is only for the 1.3.18. This is a workaround to get FQANs using the proxy field + * on request_queue. The FE use the proxy field of request_queue to insert a single FQAN + * string containing all FQAN separated by the "#" char. The proxy is a BLOB, hence it has + * to be properly converted in string. + */ + java.sql.Blob blob = fetched.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + aux.setVomsAttributes(new String(bdata)); + } + + results.add(aux); + } + + // transit state from SRM_REQUEST_QUEUED to SRM_REQUEST_INPROGRESS + if (!results.isEmpty()) { + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IN); + update.setInt(1, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + update.setString(2, "Request handled!"); + update.setString(3, makeWhereString(rowids)); + log.trace("REQUEST SUMMARY DAO - findNew: executing {}", update); + update.executeUpdate(); + } + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - findNew: Unable to complete picking. " + + "Error: {}. Rolling back!", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + + } finally { + closeResultSet(fetched); + closeStatement(fetch); + closeStatement(update); + closeConnection(con); + } + + return results; + } + + /** + * Method used to signal in the DB that a request failed: the status of the request identified by + * the primary key index is transited to SRM_FAILURE, with the supplied explanation String. The + * supplied index is the primary key of the global request. In case of any error, nothing gets + * done and no exception is thrown, but proper error messages get logged. + */ + public void failRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement ps = null; + try { + con = getConnection(); + ps = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + ps.setInt(1, statusCodeConverter.toDB(SRM_FAILURE)); + ps.setString(2, explanation); + ps.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failRequest executing: {}", ps); + ps.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO! Unable to transit request identified by " + + "ID {} to SRM_FAILURE! Error: {}", requestId, e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtGRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messages get logged. + */ + public void failPtGRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtGRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtG request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to signal in the DB that a PtPRequest failed. The global status transits to + * SRM_FAILURE, as well as that of each chunk associated to the request. The supplied explanation + * string is used both for the global status as well as for each individual chunk. The supplied + * index is the primary key of the global request. In case of any error, nothing gets done and no + * exception is thrown, but proper error messagges get logged. + */ + public void failPtPRequest(long requestId, String explanation) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + int failCode = statusCodeConverter.toDB(SRM_FAILURE); + try { + // start transaction + con = getManagedConnection(); + + // update global status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, failCode); + updateReq.setString(2, explanation); + updateReq.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateReq); + updateReq.executeUpdate(); + + // update each chunk status + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + updateChunk.setInt(1, failCode); + updateChunk.setString(2, explanation); + updateChunk.setLong(3, requestId); + log.trace("REQUEST SUMMARY DAO! failPtPRequest executing: {}", updateChunk); + updateChunk.executeUpdate(); + + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error( + "REQUEST SUMMARY DAO! Unable to transit PtP request identified " + + "by ID {} to SRM_FAILURE! Error: {}\nRolling back...", + requestId, e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String. If the supplied request + * token does not exist, nothing happens. + */ + public void updateGlobalStatus(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + public void updateGlobalStatusOnMatchingGlobalStatus(TRequestToken requestToken, + TStatusCode expectedStatusCode, TStatusCode newStatusCode, String explanation) { + + Connection con = null; + PreparedStatement update = null; + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_TOKEN_AND_STATUS_ARE); + update.setInt(1, statusCodeConverter.toDB(newStatusCode)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + update.setInt(4, statusCodeConverter.toDB(expectedStatusCode)); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatusOnMatchingGlobalStatus: executing {}", + update); + update.executeUpdate(); + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to update the global status of the request identified by the RequestToken rt. It + * gets updated the supplied status, with the supplied explanation String and pin and file + * lifetimes are updated in order to start the countdown from now. If the supplied request token + * does not exist, nothing happens. + */ + public void updateGlobalStatusPinFileLifetime(TRequestToken requestToken, TStatusCode status, + String explanation) { + + Connection con = null; + PreparedStatement update = null; + + try { + con = getConnection(); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_AND_PINLIFETIME_WHERE_TOKEN_IS); + update.setInt(1, statusCodeConverter.toDB(status)); + update.setString(2, explanation); + update.setString(3, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - updateGlobalStatus: executing {}", update); + update.executeUpdate(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(update); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_QUEUED state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_QUEUED + * state, then nothing happens. + */ + public void abortRequest(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + update = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + update = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } else { + con.rollback(); + } + } catch (SQLException e) { + + log.error("REQUEST SUMMARY DAO - abortRequest: {}", e.getMessage(), e); + e.printStackTrace(); + + } finally { + closeResultSet(rs); + closeStatement(update); + closeStatement(query); + closeConnection(con); + } + } + + /** + * Method used to transit the status of a request that is in SRM_REQUEST_INPROGRESS state, to + * SRM_ABORTED. All files associated with the request will also get their status changed to + * SRM_ABORTED. If the supplied token is null, or not found, or not in the SRM_REQUEST_INPROGRESS + * state, then nothing happens. + */ + public void abortInProgressRequest(TRequestToken rt) { + + Connection con = null; + PreparedStatement updateReq = null; + PreparedStatement updateChunk = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, rt.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + // token found... + // get ID + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update global request status + updateReq = con.prepareStatement(UPDATE_REQUEST_STATUS_WHERE_ID_IS); + updateReq.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateReq.setString(2, "User aborted request!"); + updateReq.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateReq); + updateReq.executeUpdate(); + + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + updateChunk = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS); + } else { + updateChunk = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS); + } + } + updateChunk.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + updateChunk.setString(2, "User aborted request!"); + updateChunk.setLong(3, id); + log.trace("REQUEST SUMMARY DAO - abortInProgressRequest - {}", updateChunk); + updateChunk.executeUpdate(); + } else { + con.rollback(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(updateReq); + closeStatement(updateChunk); + closeConnection(con); + } + } + + /** + * Method used to transit the status of chunks of a request that is in SRM_REQUEST_INPROGRESS + * state, to SRM_ABORTED. If the supplied token is null, or not found, or not in the + * SRM_REQUEST_INPROGRESS state, then nothing happens. + */ + public void abortChunksOfInProgressRequest(TRequestToken requestToken, Collection surls) { + + Connection con = null; + PreparedStatement update = null; + PreparedStatement query = null; + ResultSet rs = null; + + try { + con = getManagedConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_AND_STATUS); + query.setString(1, requestToken.getValue()); + query.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", query); + rs = query.executeQuery(); + + if (rs.next()) { + long id = rs.getLong("ID"); + String type = rs.getString("config_RequestTypeID"); + // update single chunk file statuses + TRequestType rtyp = requestTypeConverter.toSTORM(type); + if (EMPTY.equals(rtyp)) { + log.error("REQUEST SUMMARY DAO - Unable to complete abortRequest: " + + "could not update file statuses because the request type could " + + "not be translated from the DB!"); + con.rollback(); + } else { + if (PREPARE_TO_GET.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_GET_STATUS_WHERE_ID_IS_AND_SURL_IN); + } else if (PREPARE_TO_PUT.equals(rtyp)) { + update = con.prepareStatement(UPDATE_REQUEST_PUT_STATUS_WHERE_ID_IS_AND_SURL_IN); + } else { + update = con.prepareStatement(UPDATE_REQUEST_BOL_STATUS_WHERE_ID_IS_AND_SURL_IN); + } + } + + update.setInt(1, statusCodeConverter.toDB(SRM_ABORTED)); + update.setString(2, "User aborted request!"); + update.setLong(3, id); + update.setString(4, makeInString(surls)); + log.trace("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest - {}", update); + update.executeUpdate(); + con.commit(); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - abortChunksOfInProgressRequest: {}", e.getMessage(), e); + e.printStackTrace(); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(query); + closeStatement(update); + closeConnection(con); + } + } + + /** + * Private method that returns a String of all SURLS in the collection of String. + */ + private String makeInString(Collection c) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = c.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public TRequestType getRequestType(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + TRequestType result = EMPTY; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + log.trace("REQUEST SUMMARY DAO - typeOf - {}", query); + rs = query.executeQuery(); + if (rs.next()) { + result = TRequestType.valueOf(rs.getString("config_RequestTypeID")); + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - typeOf - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return result; + } + + /** + * Method that returns the config_RequestTypeID field present in request_queue table, for the + * request with the specified request token rt. In case of any error, the empty String "" is + * returned. + */ + public RequestSummaryDataTO find(TRequestToken requestToken) { + + Connection con = null; + PreparedStatement query = null; + ResultSet rs = null; + RequestSummaryDataTO to = null; + + try { + con = getConnection(); + query = con.prepareStatement(SELECT_FULL_REQUEST_WHERE_TOKEN_IS); + query.setString(1, requestToken.getValue()); + rs = query.executeQuery(); + + if (rs.first()) { + to = new RequestSummaryDataTO(); + to.setPrimaryKey(rs.getLong("ID")); + to.setRequestType(rs.getString("config_RequestTypeID")); + to.setClientDN(rs.getString("client_dn")); + to.setUserToken(rs.getString("u_token")); + to.setRetrytime(rs.getInt("retrytime")); + to.setPinLifetime(rs.getInt("pinLifetime")); + to.setSpaceToken(rs.getString("s_token")); + to.setStatus(rs.getInt("status")); + to.setErrstring(rs.getString("errstring")); + to.setRequestToken(rs.getString("r_token")); + to.setRemainingTotalTime(rs.getInt("remainingTotalTime")); + to.setFileLifetime(rs.getInt("fileLifetime")); + to.setNbreqfiles(rs.getInt("nbreqfiles")); + to.setNumOfCompleted(rs.getInt("numOfCompleted")); + to.setNumOfWaiting(rs.getInt("numOfWaiting")); + to.setNumOfFailed(rs.getInt("numOfFailed")); + to.setTimestamp(rs.getTimestamp("timeStamp")); + + java.sql.Blob blob = rs.getBlob("proxy"); + if (blob != null) { + byte[] bdata = blob.getBytes(1, (int) blob.length()); + to.setVomsAttributes(new String(bdata)); + } + to.setDeferredStartTime(rs.getInt("deferredStartTime")); + to.setRemainingDeferredStartTime(rs.getInt("remainingDeferredStartTime")); + + if (rs.next()) { + log.warn("More than a row matches token {}", requestToken); + } + } + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - find - {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(query); + closeConnection(con); + } + return to; + } + + /** + * Method that purges expired requests: it only removes up to a fixed value of expired requests at + * a time. The value is configured and obtained from the configuration property getPurgeBatchSize. + * A List of Strings with the request tokens removed is returned. In order to completely remove + * all expired requests, simply keep invoking this method until an empty List is returned. This + * batch processing is needed because there could be millions of expired requests which are likely + * to result in out-of-memory problems. Notice that in case of errors only error messages get + * logged. An empty List is also returned. + */ + public Collection purgeExpiredRequests(long expiredRequestTime, int purgeSize) { + + Connection con = null; + PreparedStatement fetch = null; + PreparedStatement deleteReq = null; + PreparedStatement deleteOrphans = null; + ResultSet rs = null; + Collection requestTokens = Lists.newArrayList(); + + try { + // start transaction + con = getManagedConnection(); + + fetch = con.prepareStatement(SELECT_PURGEABLE_REQUESTS_WITH_LIMIT); + fetch.setLong(1, expiredRequestTime); + fetch.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + fetch.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + fetch.setInt(4, purgeSize); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", fetch); + rs = fetch.executeQuery(); + + Collection ids = Lists.newArrayList(); + + while (rs.next()) { + requestTokens.add(rs.getString("r_token")); + ids.add(new Long(rs.getLong("ID"))); + } + + if (!ids.isEmpty()) { + // REMOVE BATCH OF EXPIRED REQUESTS! + + deleteReq = con.prepareStatement(DELETE_REQUEST_WHERE_ID_IN); + deleteReq.setString(1, makeWhereString(ids)); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteReq); + + int deleted = deleteReq.executeUpdate(); + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} expired requests.", + deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No deleted expired requests."); + } + + deleteOrphans = con.prepareStatement(DELETE_ORPHANS_DIR_OPTION); + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - {}", deleteOrphans); + deleted = deleteOrphans.executeUpdate(); + + if (deleted > 0) { + log.info("REQUEST SUMMARY DAO - purgeExpiredRequests - Deleted {} " + + "DirOption related to expired requests.", deleted); + } else { + log.trace("REQUEST SUMMARY DAO - purgeExpiredRequests - No Deleted " + + "DirOption related to expired requests."); + } + } + // commit and finish transaction + con.commit(); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + try { + con.rollback(); + } catch (SQLException e1) { + e1.printStackTrace(); + } + } finally { + closeResultSet(rs); + closeStatement(fetch); + closeStatement(deleteReq); + closeStatement(deleteOrphans); + closeConnection(con); + } + return requestTokens; + } + + /** + * Retrieve the total number of expired requests. + * + * @return + */ + public int getNumberExpired() { + + int rowCount = 0; + + Connection con = null; + PreparedStatement ps = null; + ResultSet rs = null; + + try { + // start transaction + con = getConnection(); + + ps = con.prepareStatement(COUNT_PURGEABLE_REQUESTS); + ps.setLong(1, Configuration.getInstance().getExpiredRequestTime()); + ps.setInt(2, statusCodeConverter.toDB(SRM_REQUEST_QUEUED)); + ps.setInt(3, statusCodeConverter.toDB(SRM_REQUEST_INPROGRESS)); + + log.trace("REQUEST SUMMARY DAO - Number of expired requests: {}", ps); + rs = ps.executeQuery(); + + // Get the number of rows from the result set + if (rs.next()) { + rowCount = rs.getInt(1); + } + log.debug("Nr of expired requests is: {}", rowCount); + + } catch (SQLException e) { + log.error("REQUEST SUMMARY DAO - purgeExpiredRequests - Rolling back because of error: {}", + e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(ps); + closeConnection(con); + } + + return rowCount; + + } + + /** + * Private method that returns a String of all IDs retrieved by the last SELECT. + */ + private String makeWhereString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(i.next()); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java similarity index 59% rename from src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java rename to src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java index e70e3b23b..2dcd0bea1 100644 --- a/src/main/java/it/grid/storm/catalogs/surl/SURLStatusDAO.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/SURLStatusDAOMySql.java @@ -1,10 +1,29 @@ -package it.grid.storm.catalogs.surl; +package it.grid.storm.persistence.impl.mysql; + +import static it.grid.storm.srm.types.TStatusCode.SRM_SPACE_AVAILABLE; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; import it.grid.storm.catalogs.PtPChunkCatalog; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.StatusCodeConverter; -import it.grid.storm.catalogs.StoRMDataSource; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.SURLStatusDAO; +import it.grid.storm.persistence.pool.StormDbConnectionPool; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; @@ -14,41 +33,46 @@ import it.grid.storm.synchcall.surl.ExpiredTokenException; import it.grid.storm.synchcall.surl.UnknownTokenException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +public class SURLStatusDAOMySql extends AbstractDAO implements SURLStatusDAO { -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + public static final Logger LOGGER = LoggerFactory.getLogger(SURLStatusDAOMySql.class); -public class SURLStatusDAO { + private static SURLStatusDAO instance; - public static final Logger LOGGER = LoggerFactory - .getLogger(SURLStatusDAO.class); + public static synchronized SURLStatusDAO getInstance() { + if (instance == null) { + instance = new SURLStatusDAOMySql(); + } + return instance; + } + + private final StatusCodeConverter converter; + private final RequestSummaryCatalog requestSummaryCatalog; + private final PtPChunkCatalog ptpChunkCatalog; + + private SURLStatusDAOMySql() { + super(StormDbConnectionPool.getInstance()); + converter = StatusCodeConverter.getInstance(); + requestSummaryCatalog = RequestSummaryCatalog.getInstance(); + ptpChunkCatalog = PtPChunkCatalog.getInstance(); + } - public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " - + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "AND (sg.statusCode=22 OR sg.statusCode=17) "; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " + + "SET sg.statusCode=20, rq.status=20, sg.explanation=? " + + "WHERE rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " + + "AND (sg.statusCode=22 OR sg.statusCode=17) "; if (user != null) { query += "AND rq.client_dn = ?"; @@ -63,43 +87,39 @@ public boolean abortActivePtGsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); - LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); - - return (updateCount != 0); + updateCount = stat.executeUpdate(); + LOGGER.debug("abortActivePtGsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtGsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtGsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } + return (updateCount != 0); } - public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, - String explanation) { + public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, String explanation) { surlSanityChecks(surl); - PreparedStatement stat = null; Connection con = null; + PreparedStatement stat = null; + int updateCount = 0; try { con = getConnection(); - String query = "UPDATE status_Put sp " - + "JOIN (request_Put rp, request_queue rq) " - + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " - + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " - + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "AND (sp.statusCode=24 OR sp.statusCode=17)"; + String query = "UPDATE status_Put sp JOIN (request_Put rp, request_queue rq) " + + "ON sp.request_PutID=rp.ID AND rp.request_queueID=rq.ID " + + "SET sp.statusCode=20, rq.status=20, sp.explanation=? " + + "WHERE rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " + + "AND (sp.statusCode=24 OR sp.statusCode=17)"; if (user != null) { query += "AND rq.client_dn = ?"; @@ -114,36 +134,30 @@ public boolean abortActivePtPsForSURL(GridUserInterface user, TSURL surl, stat.setString(4, user.getDn()); } - final int updateCount = stat.executeUpdate(); + updateCount = stat.executeUpdate(); - LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", - surl, updateCount); - - return (updateCount != 0); + LOGGER.debug("abortActivePtPsForSURL: surl={}, numOfAbortedRequests={}", surl, updateCount); } catch (SQLException e) { - String msg = String.format("abortActivePtPsForSURL: SQL error: %s", - e.getMessage()); - LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + String msg = String.format("abortActivePtPsForSURL: SQL error: %s", e.getMessage()); + LOGGER.error(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - + return (updateCount != 0); } - private Map buildStatusMap(ResultSet rs) - throws SQLException { + private Map buildStatusMap(ResultSet rs) throws SQLException { if (rs == null) { throw new IllegalArgumentException("rs cannot be null"); } Map statusMap = new HashMap(); - StatusCodeConverter converter = StatusCodeConverter.getInstance(); while (rs.next()) { TSURL surl = surlFromString(rs.getString(1)); TStatusCode sc = converter.toSTORM(rs.getInt(2)); @@ -155,42 +169,8 @@ private Map buildStatusMap(ResultSet rs) } - private void closeConnection(Connection conn) { - - if (conn != null) { - try { - conn.close(); - } catch (SQLException e) { - LOGGER.error("Error closing connection: {}.", e.getMessage(), e); - } - } - } - - private void closeResultSet(ResultSet rs) { - - if (rs != null) { - - try { - rs.close(); - } catch (SQLException e) { - LOGGER.error("Error closing result set: {}", e.getMessage(), e); - } - } - } - - private void closeStatetement(Statement stat) { - - if (stat != null) { - try { - stat.close(); - } catch (SQLException e) { - LOGGER.error("Error closing statement: {}.", e.getMessage(), e); - } - } - } - - private Map filterSURLStatuses( - Map statuses, List surls) { + private Map filterSURLStatuses(Map statuses, + List surls) { if (surls == null) { return statuses; @@ -209,8 +189,8 @@ private Map filterSURLStatuses( // Add a failure state for the surls that were // requested but are not linked to the token for (TSURL s : surlsCopy) { - statuses.put(s, new TReturnStatus(TStatusCode.SRM_FAILURE, - "SURL not linked to passed request token.")); + statuses.put(s, + new TReturnStatus(TStatusCode.SRM_FAILURE, "SURL not linked to passed request token.")); } return statuses; @@ -223,47 +203,40 @@ private Map getBoLSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = null; try { con = getConnection(); String query = "SELECT rb.sourceSURL, sb.statusCode " - + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " - + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_BoL rb, status_BoL sb) " + + "ON (rb.request_queueID = rq.ID AND sb.request_BoLID = rb.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getBoLSURLStatuses: SQL error: %s", - e.getMessage()); - + String msg = String.format("getBoLSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + result = Maps.newHashMap(); } finally { - closeStatetement(stat); + closeResultSet(rs); + closeStatement(stat); closeConnection(con); } + return result; } - private Connection getConnection() throws SQLException { - - if (StoRMDataSource.getInstance() == null) { - throw new IllegalStateException("SToRM Data source not initialized!"); - } - return StoRMDataSource.getInstance().getConnection(); - } - - public Map getPinnedSURLsForUser( - GridUserInterface user, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + List surls) { if (user == null) { throw new NullPointerException("getPinnedSURLsForUser: null user!"); @@ -272,23 +245,22 @@ public Map getPinnedSURLsForUser( ResultSet rs = null; PreparedStatement stat = null; Connection con = null; - - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -298,22 +270,25 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + + return result; } - public Map getPinnedSURLsForUser( - GridUserInterface user, TRequestToken token, List surls) { + public Map getPinnedSURLsForUser(GridUserInterface user, + TRequestToken token, List surls) { userSanityChecks(user); tokenSanityChecks(token); @@ -323,22 +298,22 @@ public Map getPinnedSURLsForUser( PreparedStatement stat = null; Connection con = null; - StatusCodeConverter converter = StatusCodeConverter.getInstance(); + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rg.sourceSURL, rg.sourceSURL_uniqueID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID=rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( sg.statusCode = 22 and rq.client_dn = ? and rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); stat.setString(2, token.getValue()); rs = stat.executeQuery(); - Map statusMap = new HashMap(); + Map statusMap = Maps.newHashMap(); while (rs.next()) { @@ -348,18 +323,20 @@ public Map getPinnedSURLsForUser( } - return filterSURLStatuses(statusMap, surls); + result = filterSURLStatuses(statusMap, surls); } catch (SQLException e) { - String msg = String.format("getPinnedSURLsForUser: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPinnedSURLsForUser: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtGSURLStatuses(TRequestToken token) { @@ -370,30 +347,34 @@ private Map getPtGSURLStatuses(TRequestToken token) { PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); + try { con = getConnection(); String query = "SELECT rg.sourceSURL, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID=rg.ID) " + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtGSURLStatuses: SQL error: %s", - e.getMessage()); + + String msg = String.format("getPtGSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } private Map getPtPSURLStatuses(TRequestToken token) { @@ -403,39 +384,39 @@ private Map getPtPSURLStatuses(TRequestToken token) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + Map result = Maps.newHashMap(); try { con = getConnection(); String query = "SELECT rp.targetSURL, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" - + "WHERE ( rq.r_token = ? )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID = rq.ID AND sp.request_PutID = rp.ID)" + + "WHERE ( rq.r_token = ? )"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); rs = stat.executeQuery(); - return buildStatusMap(rs); + result = buildStatusMap(rs); } catch (SQLException e) { - String msg = String.format("getPtPSURLStatuses: SQL error: %s", - e.getMessage()); + String msg = String.format("getPtPSURLStatuses: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } public Map getSURLStatuses(TRequestToken token) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); if (rt.isEmpty()) throw new UnknownTokenException(token.getValue()); @@ -444,41 +425,38 @@ public Map getSURLStatuses(TRequestToken token) { throw new ExpiredTokenException(token.getValue()); switch (rt) { - case PREPARE_TO_GET: - return getPtGSURLStatuses(token); + case PREPARE_TO_GET: + return getPtGSURLStatuses(token); - case PREPARE_TO_PUT: - return getPtPSURLStatuses(token); + case PREPARE_TO_PUT: + return getPtPSURLStatuses(token); - case BRING_ON_LINE: - return getBoLSURLStatuses(token); + case BRING_ON_LINE: + return getBoLSURLStatuses(token); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } - public Map getSURLStatuses(TRequestToken token, - List surls) { + public Map getSURLStatuses(TRequestToken token, List surls) { - TRequestType rt = RequestSummaryCatalog.getInstance().typeOf(token); + TRequestType rt = requestSummaryCatalog.typeOf(token); switch (rt) { - case PREPARE_TO_GET: - return filterSURLStatuses(getPtGSURLStatuses(token), surls); + case PREPARE_TO_GET: + return filterSURLStatuses(getPtGSURLStatuses(token), surls); - case PREPARE_TO_PUT: - return filterSURLStatuses(getPtPSURLStatuses(token), surls); + case PREPARE_TO_PUT: + return filterSURLStatuses(getPtPSURLStatuses(token), surls); - case BRING_ON_LINE: - return filterSURLStatuses(getBoLSURLStatuses(token), surls); + case BRING_ON_LINE: + return filterSURLStatuses(getBoLSURLStatuses(token), surls); - default: - String msg = String.format("Invalid request type for token %s: %s", - token, rt.toString()); - throw new IllegalArgumentException(msg); + default: + String msg = String.format("Invalid request type for token %s: %s", token, rt.toString()); + throw new IllegalArgumentException(msg); } } @@ -487,9 +465,8 @@ public int markSURLsReadyForRead(TRequestToken token, List surls) { tokenSanityChecks(token); surlSanityChecks(surls); - // I am not reimplementing the whole catalog functions - return PtPChunkCatalog.getInstance().updateFromPreviousStatus(token, surls, - TStatusCode.SRM_SPACE_AVAILABLE, TStatusCode.SRM_SUCCESS); + // I am not re-implementing the whole catalog functions + return ptpChunkCatalog.updateFromPreviousStatus(token, surls, SRM_SPACE_AVAILABLE, SRM_SUCCESS); } @@ -534,25 +511,25 @@ public void releaseSURL(TSURL surl) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21" - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21" + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " + + "AND rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ?"; stat = con.prepareStatement(query); stat.setString(1, surl.getSURLString()); stat.setInt(2, surl.uniqueId()); stat.executeUpdate(); + } catch (SQLException e) { + String msg = String.format("releaseSURL: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -568,14 +545,11 @@ public void releaseSURLs(GridUserInterface user, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.client_dn = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.client_dn = ?"; stat = con.prepareStatement(query); stat.setString(1, user.getDn()); @@ -584,15 +558,15 @@ public void releaseSURLs(GridUserInterface user, List surls) { LOGGER.debug("releaseSURLs: released {} surls", releasedSURLsCount); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } - } public void releaseSURLs(List surls) { @@ -605,24 +579,23 @@ public void releaseSURLs(List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ")"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ")"; stat = con.prepareStatement(query); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -638,26 +611,24 @@ public void releaseSURLs(TRequestToken token, List surls) { try { con = getConnection(); - String query = "UPDATE status_Get sg " - + "JOIN (request_Get rg, request_queue rq) " - + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID " - + "SET sg.statusCode=21 " - + "WHERE (sg.statusCode=22 OR sg.statusCode=0) " - + "AND rg.sourceSURL_uniqueID IN (" + quoteSURLUniqueIDs(surls) + ") " - + "AND rg.sourceSURL IN (" + quoteSURLList(surls) + ") " - + "AND rq.r_token = ?"; + String query = "UPDATE status_Get sg JOIN (request_Get rg, request_queue rq) " + + "ON sg.request_GetID=rg.ID AND rg.request_queueID=rq.ID SET sg.statusCode=21 " + + "WHERE (sg.statusCode=22 OR sg.statusCode=0) AND rg.sourceSURL_uniqueID IN (" + + quoteSURLUniqueIDs(surls) + ") AND rg.sourceSURL IN (" + quoteSURLList(surls) + + ") AND rq.r_token = ?"; stat = con.prepareStatement(query); stat.setString(1, token.getValue()); stat.executeUpdate(); } catch (SQLException e) { + String msg = String.format("releaseSURLs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); } finally { - closeStatetement(stat); + + closeStatement(stat); closeConnection(con); } } @@ -669,8 +640,7 @@ private TSURL surlFromString(String s) { return TSURL.makeFromStringWellFormed(s); } catch (InvalidTSURLAttributesException e) { - throw new IllegalArgumentException("Error creating surl from string: " - + s, e); + throw new IllegalArgumentException("Error creating surl from string: " + s, e); } } @@ -681,6 +651,7 @@ public boolean surlHasOngoingPtGs(TSURL surl) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { con = getConnection(); @@ -688,27 +659,30 @@ public boolean surlHasOngoingPtGs(TSURL surl) { // We basically check whether there are active requests // that have the SURL in SRM_FILE_PINNED status String query = "SELECT rq.ID, rg.ID, sg.statusCode " - + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " - + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " - + "WHERE ( rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " - + "and sg.statusCode = 22 )"; + + "FROM request_queue rq JOIN (request_Get rg, status_Get sg) " + + "ON (rg.request_queueID = rq.ID AND sg.request_GetID = rg.ID) " + + "WHERE ( rg.sourceSURL = ? and rg.sourceSURL_uniqueID = ? " + + "and sg.statusCode = 22 )"; stat = con.prepareStatement(query); stat.setString(1, surl.getSURLString()); stat.setInt(2, surl.uniqueId()); rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtGs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtGs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } + return result; } public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { @@ -718,6 +692,7 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { ResultSet rs = null; PreparedStatement stat = null; Connection con = null; + boolean result = false; try { @@ -725,10 +700,9 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { // We basically check whether there are active requests // that have the SURL in SRM_SPACE_AVAILABLE status String query = "SELECT rq.ID, rp.ID, sp.statusCode " - + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " - + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " - + "WHERE ( rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " - + "and sp.statusCode=24 )"; + + "FROM request_queue rq JOIN (request_Put rp, status_Put sp) " + + "ON (rp.request_queueID=rq.ID AND sp.request_PutID=rp.ID) " + + "WHERE ( rp.targetSURL = ? and rp.targetSURL_uniqueID = ? " + "and sp.statusCode=24 )"; if (ptpRequestToken != null) { query += " AND rq.r_token != ?"; @@ -743,18 +717,20 @@ public boolean surlHasOngoingPtPs(TSURL surl, TRequestToken ptpRequestToken) { } rs = stat.executeQuery(); - return rs.next(); + result = rs.next(); + } catch (SQLException e) { - String msg = String.format("surlHasOngoingPtPs: SQL error: %s", - e.getMessage()); + + String msg = String.format("surlHasOngoingPtPs: SQL error: %s", e.getMessage()); LOGGER.error(msg, e); - throw new RuntimeException(msg, e); + } finally { - closeStatetement(stat); + + closeStatement(stat); closeResultSet(rs); closeConnection(con); } - + return result; } private void surlSanityChecks(List surls) { diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java index 8e35b51f9..b303749c9 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/StorageSpaceDAOMySql.java @@ -17,14 +17,6 @@ package it.grid.storm.persistence.impl.mysql; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.AbstractDAO; -import it.grid.storm.persistence.dao.StorageSpaceDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -36,604 +28,586 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** - * find = con.prepareStatement( - * "SELECT storm_get_filereq.rowid, storm_req.r_token, storm_get_filereq.from_surl, storm_get_filereq.lifetime, storm_get_filereq.s_token, storm_get_filereq.flags, storm_req.protocol, storm_get_filereq.actual_size, storm_get_filereq.status, storm_get_filereq.errstring, storm_get_filereq.pfn FROM storm_get_filereq, storm_req WHERE storm_get_filereq.r_token=storm_req.r_token AND storm_get_filereq.r_token=?" - * ); - **/ - -public class StorageSpaceDAOMySql extends AbstractDAO implements - StorageSpaceDAO { - - private static final Logger log = LoggerFactory - .getLogger(StorageSpaceDAOMySql.class); - - private StorageSpaceSQLHelper helper; - - /** - * CONSTRUCTOR - */ - public StorageSpaceDAOMySql() { - - helper = new StorageSpaceSQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - /** - * addStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - - public void addStorageSpace(StorageSpaceTO ss) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.insertQuery(conn, ss); - log.info("INSERT query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("INSERT result = {}", res); - if (res <= 0) { - log - .error("No row inserted for statement : {}", prepStatement.toString()); - throw new DataAccessException("No rows inserted for Storage Space"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * getStorageSpaceById - * - * @param ssId - * Long - * @return StorageSpace - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceById(Long ssId) - throws DataAccessException { - - throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); - } - - public Collection findAll() throws DataAccessException { - - throw new DataAccessException("findAll: Unimplemented method!"); - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'user' and with the - * specified alias ('spaceAlias'). 'spaceAlias' can be NULL or empty and in - * these cases a Collection of all the StorageSpaceTO owned by 'user' is - * returned. - * - * @param owner - * VomsGridUser. - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByOwner( - GridUserInterface owner, String spaceAlias) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasQuery(conn, owner, spaceAlias); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}", res); - if (res.first() == false) { - log.debug("No rows found for query : {}", prepStatement.toString()); - } else { - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO owned by 'VO'. - * - * @param voname - * Vo. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - - public Collection getStorageSpaceBySpaceType(String stype) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - PreparedStatement prepStatement = null; - - Connection conn = getConnection(); - ResultSet res = null; - - try { - prepStatement = helper.selectBySpaceType(conn, stype); - log.debug("DB query = {}", prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}", res); - if (res.first() == false) { - log.info("No rows found for query : {}", prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * Returns a Collection of StorageSpaceTO with the specified alias - * ('spaceAlias'). 'spaceAlias' can not be be NULL or empty. - * - * @param spaceAlias - * String. - * @return Collection of StorageSpaceTO. - * @throws DataAccessException - */ - public Collection getStorageSpaceByAliasOnly(String spaceAlias) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectBySpaceAliasOnlyQuery(conn, spaceAlias); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("query result = {}" , res); - - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * getStorageSpaceByToken - * - * @param token - * TSpaceToken - * @return StorageSpace , null if not row found on that token - * @throws DataAccessException - */ - public StorageSpaceTO getStorageSpaceByToken(String token) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - - Connection conn = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - try { - prepStatement = helper.selectByTokenQuery(conn, token); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // take the first - ssTO = helper.makeStorageSpaceTO(res); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return ssTO; - } - - @Override - public Collection getStorageSpaceByUnavailableUsedSpace( - long unavailableSizeValue) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByUnavailableUsedSpaceSizeQuery(conn, - unavailableSizeValue); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public Collection getStorageSpaceByPreviousLastUpdate( - Date lastUpdateTimestamp) throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectByPreviousOrNullLastUpdateQuery(conn, - lastUpdateTimestamp.getTime()); - log.debug("SELECT query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - log.debug("SELECT result = {}" , res); - if (res.first() == false) { - log.info("No rows found for query : {}" , prepStatement.toString()); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing INSERT query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - /** - * removeStorageSpace - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(GridUserInterface user, String spaceToken) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, user, spaceToken); - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - if (res <= 0) { - log.error("Error removing Storage Space with token = {} for " - + "user {} not found", spaceToken, user.getDn()); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' for user '" + user.getDn() + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * removeStorageSpace only by spaceToken - * - * @param ss - * StorageSpace - * @throws DataAccessException - */ - public void removeStorageSpace(String spaceToken) throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.removeByTokenQuery(conn, spaceToken); - - log.debug("query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("Number of rows removed: {}" , res); - - if (res <= 0) { - log.error("Error removing Storage Space with token = {}. Space not found", - spaceToken); - - throw new DataAccessException("Storage Space with token = '" - + spaceToken + "' not found!"); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DELETE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByAliasAndTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated by " - + "query : {}. updated {} rows.", - prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - long freeSpace = ssTO.getFreeSize(); - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateFreeSpaceByTokenQuery(conn, - ssTO.getSpaceToken(), freeSpace, new Date()); - - log.debug("UPDATE query = {}" , prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}", res); - if (res <= 0) { - log.error("No storage space rows updated by query : {}" - , prepStatement.toString()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * - * @param ssTO - * StorageSpaceTO - * @throws DataAccessException - */ - public void updateAllStorageSpace(StorageSpaceTO ssTO) - throws DataAccessException { - - Connection conn = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.updateByTokenQuery(conn, ssTO); - log.debug("UPDATE query = {}", prepStatement.toString()); - - int res = prepStatement.executeUpdate(); - log.debug("UPDATE row count = {}" , res); - if (res != 1) { - if (res < 1) { - log.error("No storage space rows updated by query {}" - , prepStatement.toString()); - } else { - log.warn("More than a single storage space rows updated " - + "by query : {}. updated {} rows" - ,prepStatement.toString(), res); - } - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing UPDATE query", e); - } finally { - releaseConnection(null, prepStatement, conn); - } - } - - /** - * Method used to retrieve the set of StorageTO for expired space. - * - * @param long timeInSecond - * @return Collection of transfer object - */ - public Collection getExpired(long currentTimeInSecond) - throws DataAccessException { - - StorageSpaceTO ssTO = null; - Collection result = new LinkedList(); - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = helper.selectExpiredQuery(conn, currentTimeInSecond); - log.debug("DB query = {}" , prepStatement.toString()); - - res = prepStatement.executeQuery(); - - log.debug("query result = {}" , res); - if (res.first() == false) { - log.debug("No rows found for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space expired found at time " - + currentTimeInSecond); - } else { - // Fetch each row from the result set - do { - ssTO = helper.makeStorageSpaceTO(res); - result.add(ssTO); - } while (res.next()); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return result; - } - - @Override - public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.increaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToAdd); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.StorageSpaceDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.StorageSpaceTO; +import it.grid.storm.persistence.pool.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.StorageSpaceSQLHelper; + +public class StorageSpaceDAOMySql extends AbstractDAO implements StorageSpaceDAO { + + private static final Logger log = LoggerFactory.getLogger(StorageSpaceDAOMySql.class); + + private static StorageSpaceDAO instance; + + public static synchronized StorageSpaceDAO getInstance() { + if (instance == null) { + instance = new StorageSpaceDAOMySql(); + } + return instance; + } + + private StorageSpaceSQLHelper helper; + + private StorageSpaceDAOMySql() { + super(StormBeIsamConnectionPool.getInstance()); + helper = new StorageSpaceSQLHelper(); + } + + /** + * addStorageSpace + * + * @param ss StorageSpace + */ + public void addStorageSpace(StorageSpaceTO ss) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.insertQuery(con, ss); + + log.debug("INSERT query = {}", ps); + res = ps.executeUpdate(); + log.debug("INSERT result = {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + if (res <= 0) { + log.error("No rows inserted for Storage Space: {}", ss.toString()); + } + } + + /** + * getStorageSpaceById + * + * @param ssId Long + * @return StorageSpace + * @throws DataAccessException + */ + public StorageSpaceTO getStorageSpaceById(Long ssId) throws DataAccessException { + + throw new DataAccessException("getStorageSpaceById: Unimplemented method!"); + } + + public Collection findAll() throws DataAccessException { + + throw new DataAccessException("findAll: Unimplemented method!"); + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'user' and with the specified alias + * ('spaceAlias'). 'spaceAlias' can be NULL or empty and in these cases a Collection of all the + * StorageSpaceTO owned by 'user' is returned. + * + * @param owner VomsGridUser. + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByOwner(GridUserInterface owner, + String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasQuery(con, owner, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } - - @Override - public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) - throws DataAccessException { - - Connection conn = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - int n = 0; - - try { - prepStatement = helper.decreaseUsedSpaceByTokenQuery(conn, spaceToken, usedSpaceToRemove); - log.debug("DB query = {}" , prepStatement.toString()); - - n = prepStatement.executeUpdate(); - - log.debug("query result = {}" , n); - if (n == 0) { - log.debug("No rows updated for query : {}" , prepStatement.toString()); - throw new DataAccessException("No storage space updated!"); - } - - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error while executing DB query", e); - } finally { - releaseConnection(res, prepStatement, conn); - } - return n; + return result; + } + + /** + * Returns a Collection of StorageSpaceTO owned by 'VO'. + * + * @param stype. + * @return Collection of StorageSpaceTO. + */ + + public Collection getStorageSpaceBySpaceType(String stype) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceType(con, stype); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); } + return result; + } + + /** + * Returns a Collection of StorageSpaceTO with the specified alias ('spaceAlias'). 'spaceAlias' + * can not be be NULL or empty. + * + * @param spaceAlias String. + * @return Collection of StorageSpaceTO. + */ + public Collection getStorageSpaceByAliasOnly(String spaceAlias) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectBySpaceAliasOnlyQuery(con, spaceAlias); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * getStorageSpaceByToken + * + * @param token TSpaceToken + * @return StorageSpace , null if not row found on that token + */ + public StorageSpaceTO getStorageSpaceByToken(String token) { + + StorageSpaceTO ssTO = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByTokenQuery(con, token); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + ssTO = helper.makeStorageSpaceTO(res); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ssTO; + } + + @Override + public Collection getStorageSpaceByUnavailableUsedSpace( + long unavailableSizeValue) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByUnavailableUsedSpaceSizeQuery(con, unavailableSizeValue); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + @Override + public Collection getStorageSpaceByPreviousLastUpdate(Date lastUpdateTimestamp) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = helper.selectByPreviousOrNullLastUpdateQuery(con, lastUpdateTimestamp.getTime()); + + log.debug("SELECT query = {}", ps); + res = ps.executeQuery(); + log.debug("SELECT result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.info("No rows found for query : {}", ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return result; + } + + /** + * removeStorageSpace + * + * @param ss StorageSpace + */ + public void removeStorageSpace(GridUserInterface user, String spaceToken) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, user, spaceToken); + log.debug("query = {}", ps); + + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * removeStorageSpace only by spaceToken + * + * @param ss StorageSpace + * @throws DataAccessException + */ + public void removeStorageSpace(String spaceToken) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.removeByTokenQuery(con, spaceToken); + + log.debug("query = {}", ps); + res = ps.executeUpdate(); + log.debug("Number of rows removed: {}", res); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByAliasAndTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated by " + "query : {}. updated {} rows.", + ps, res); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + /** + * + * @param ssTO StorageSpaceTO + * @throws DataAccessException + */ + public void updateStorageSpaceFreeSpace(StorageSpaceTO ssTO) throws DataAccessException { + + long freeSpace = ssTO.getFreeSize(); + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + + con = getConnection(); + ps = helper.updateFreeSpaceByTokenQuery(con, ssTO.getSpaceToken(), freeSpace, new Date()); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res <= 0) { + log.warn("No storage space rows updated by query : {}", ps); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + throw new DataAccessException("Error while executing UPDATE query", e); + } finally { + closeStatement(ps); + closeConnection(con); } + } + + /** + * + * @param ssTO StorageSpaceTO + */ + public void updateAllStorageSpace(StorageSpaceTO ssTO) { + + Connection con = null; + PreparedStatement ps = null; + int res = 0; + + try { + con = getConnection(); + ps = helper.updateByTokenQuery(con, ssTO); + + log.debug("UPDATE query = {}", ps); + res = ps.executeUpdate(); + log.debug("UPDATE row count = {}", res); + + if (res == 0) { + log.warn("No storage space rows updated by query {}", ps); + } + if (res > 1) { + log.warn( + "More than a single storage space rows updated " + "by query : {}. updated {} rows", ps, + res); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); } + } + + /** + * Method used to retrieve the set of StorageTO for expired space. + * + * @param long timeInSecond + * @return Collection of transfer object + */ + public Collection getExpired(long currentTimeInSecond) { + + StorageSpaceTO ssTO = null; + Collection result = new LinkedList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = helper.selectExpiredQuery(con, currentTimeInSecond); + + log.debug("DB query = {}", ps); + res = ps.executeQuery(); + log.debug("query result = {}", res); + + if (res.first()) { + do { + ssTO = helper.makeStorageSpaceTO(res); + result.add(ssTO); + } while (res.next()); + } else { + log.debug("No rows found for query : {}", ps); + log.debug("No storage space expired found at time " + currentTimeInSecond); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return result; + } + + @Override + public int increaseUsedSpace(String spaceToken, long usedSpaceToAdd) throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.increaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToAdd); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } + + @Override + public int decreaseUsedSpace(String spaceToken, long usedSpaceToRemove) + throws DataAccessException { + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + int n = 0; + + try { + + con = getConnection(); + ps = helper.decreaseUsedSpaceByTokenQuery(con, spaceToken, usedSpaceToRemove); + + log.debug("DB query = {}", ps); + n = ps.executeUpdate(); + log.debug("query result = {}", n); + + if (n == 0) { + log.debug("No storage space updated!"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return n; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java index bafab8df9..c4f3d744e 100644 --- a/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/TapeRecallDAOMySql.java @@ -18,747 +18,707 @@ package it.grid.storm.persistence.impl.mysql; import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.valueOf; - -import com.google.common.collect.Lists; - -import it.grid.storm.persistence.PersistenceDirector; -import it.grid.storm.persistence.dao.TapeRecallDAO; -import it.grid.storm.persistence.exceptions.DataAccessException; -import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; -import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.tape.recalltable.model.TapeRecallStatus; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FILE_NAME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_GROUP_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_PIN_LIFETIME; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_REQUEST_TYPE; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_RETRY_ATTEMPT; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_STATUS; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_TASK_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_USER_ID; +import static it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper.COL_VO_NAME; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Statement; import java.sql.Timestamp; import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; import java.util.List; +import java.util.Optional; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class TapeRecallDAOMySql extends TapeRecallDAO { +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.TapeRecallDAO; +import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.model.TapeRecallTO; +import it.grid.storm.persistence.pool.StormBeIsamConnectionPool; +import it.grid.storm.persistence.util.helper.TapeRecallMySQLHelper; +import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.tape.recalltable.model.TapeRecallStatus; + +public class TapeRecallDAOMySql extends AbstractDAO implements TapeRecallDAO { + + private static final Logger log = LoggerFactory.getLogger(TapeRecallDAOMySql.class); + + private static TapeRecallDAO instance; + + public static synchronized TapeRecallDAO getInstance() { + if (instance == null) { + instance = new TapeRecallDAOMySql(); + } + return instance; + } + + private final TapeRecallMySQLHelper sqlHelper; + + private TapeRecallDAOMySql() { + + super(StormBeIsamConnectionPool.getInstance()); + sqlHelper = new TapeRecallMySQLHelper(); + } + + @Override + public int getNumberInProgress() throws DataAccessException { + + return getNumberInProgress(null); + } + + @Override + public int getNumberInProgress(String voName) throws DataAccessException { - private static final Logger log = LoggerFactory - .getLogger(TapeRecallDAOMySql.class); - - private final TapeRecallMySQLHelper sqlHelper; - - public TapeRecallDAOMySql() { - - sqlHelper = new TapeRecallMySQLHelper(PersistenceDirector.getDataBase() - .getDbmsVendor()); - } - - @Override - public int getNumberInProgress() throws DataAccessException { - - return getNumberInProgress(null); - } - - @Override - public int getNumberInProgress(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberInProgress(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryNumberInProgress(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getNumberQueued() throws DataAccessException { - - return getNumberQueued(null); - } - - @Override - public int getNumberQueued(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection); - } else { - prepStatement = sqlHelper.getQueryNumberQueued(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public int getReadyForTakeOver() throws DataAccessException { - - return getReadyForTakeOver(null); - } - - @Override - public int getReadyForTakeOver(String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - int status = 0; - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryReadyForTakeOver(dbConnection); - } else { - prepStatement = sqlHelper - .getQueryReadyForTakeOver(dbConnection, voName); - } - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (res.first()) { - status = res.getInt(1); - } - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return status; - } - - @Override - public List getGroupTasks(UUID groupTaskId) - throws DataAccessException { - - TapeRecallTO task = null; - List taskList = Lists.newArrayList(); - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } while (res.next()); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { - - boolean response = false; - - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper - .getQueryGetGroupTasks(dbConnection, groupTaskId); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - if (!response) { - log.info("No tasks found with GroupTaskId='{}'",groupTaskId); - } - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public TapeRecallTO getTask(UUID taskId, String requestToken) - throws DataAccessException { - - TapeRecallTO task; - Connection dbConnection = getConnection(); - ResultSet res = null; - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, prepStatement); - - throw new DataAccessException("No task found for requestToken=" - + requestToken + " " + "taskId=" + taskId + ". Query = " - + prepStatement); - } - task = new TapeRecallTO(); - setTaskInfo(task, res); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return task; - } - - @Override - public boolean existsTask(UUID taskId, String requestToken) - throws DataAccessException { - - boolean response; - - Connection dbConnection = getConnection(); - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetTask(dbConnection, taskId, - requestToken); - - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - response = res.first(); - } catch (SQLException e) { - log.error(e.getMessage(), e); - throw new DataAccessException("Error executing query: '" - + prepStatement + "' " + e.getMessage(), e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return response; - } - - @Override - public UUID insertCloneTask(TapeRecallTO task, int[] statuses, - UUID proposedGroupTaskId) throws DataAccessException { - - if (task.getTaskId() == null || task.getRequestToken() == null - || task.getRequestToken().getValue().trim().isEmpty()) { - log - .error("received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", task.getTaskId(), task.getRequestToken()); - throw new DataAccessException( - "Unable to create insert the task wth the provided UUID and " - + "request token using UUID-namebased algorithm. TaskId = " - + task.getTaskId() + " , request token = " + task.getRequestToken()); - } - Integer status = task.getStatusId(); - - Connection dbConnection = getConnection(); - PreparedStatement prepStat = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - try { - - if (statuses == null || statuses.length == 0) { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId()); - } else { - prepStat = sqlHelper.getQueryGetGroupTaskIds(dbConnection, - task.getTaskId(), statuses); - } - log.debug("QUERY: {}", prepStat); - - res = prepStat.executeQuery(); - - if (res.first()) { - /* Take the first, but there can be more than one result */ - String uuidString = res - .getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - status = Integer.valueOf(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setStatusId(status.intValue()); - task.setGroupTaskId(UUID.fromString(uuidString)); - Calendar calendar = new GregorianCalendar(); - try { - task.forceStatusUpdateInstants( - res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), - res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } else { - log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); - task.setGroupTaskId(proposedGroupTaskId); - task.setStatusId(status.intValue()); - } - - prepStat = sqlHelper.getQueryInsertTask(dbConnection, task); - if (prepStat == null) { - // this case is possible if and only if the task is null or empty - log.error("Cannot create the query because the task is null or empty."); - throw new DataAccessException( - "Cannot create the query because the task is null or empty."); - } - try { - log.debug("Query(insert-task)={}", prepStat); - prepStat.executeUpdate(); - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " - + prepStat + " ; " + e.getMessage(), e); - } - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query : " + " ; " - + e.getMessage(), e); - } finally { - releaseConnection(new ResultSet[] { res }, new Statement[] { prepStat }, - dbConnection); - } - return task.getGroupTaskId(); - } - - /* - * (non-Javadoc) - * - * @see it.grid.storm.persistence.dao.TapeRecallDAO#purgeCompletedTasks(int) - */ - @Override - public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { - - PreparedStatement ps = null; - Connection con = getConnection(); - - int count = 0; - boolean hasLimit = numTasks > 0; - try { - if (hasLimit) { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); - } else { - ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); - } - - count = ps.executeUpdate(); - - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " + ps, e); - } finally { - releaseConnection(null, ps, con); - } - - return count; - } - - @Override - public void setGroupTaskRetryValue(UUID groupTaskId, int value) - throws DataAccessException { - - Connection dbConnection = getConnection(); - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQuerySetGroupTaskRetryValue(dbConnection, - groupTaskId, value); - - prepStatement.executeUpdate(); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(null, prepStatement, dbConnection); - } - - } - - @Override - public TapeRecallTO takeoverTask() throws DataAccessException { - - return takeoverTask(null); - } - - @Override - public TapeRecallTO takeoverTask(String voName) throws DataAccessException { - - List taskList = takeoverTasksWithDoubles(1, voName); - - if (taskList.isEmpty()) { - return null; - } - return taskList.get(0); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks) - throws DataAccessException { - - return takeoverTasksWithDoubles(numberOfTaks, null); - } - - @Override - public List takeoverTasksWithDoubles(int numberOfTaks, - String voName) throws DataAccessException { - - Connection dbConnection = getConnection(); - - List taskList = Lists.newLinkedList(); - TapeRecallTO task = null; - ResultSet res = null; - - PreparedStatement prepStatement = null; - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - try { - if (voName == null) { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks); - } else { - prepStatement = sqlHelper.getQueryGetTakeoverTasksWithDoubles( - dbConnection, numberOfTaks, voName); - } - // start transaction - log.debug("QUERY: {}", prepStatement); - res = prepStatement.executeQuery(); - if (!res.first()) { - log.info("No tape recall rows ready for takeover"); - return taskList; - } - do { - task = new TapeRecallTO(); - setTaskInfo(task, res); - task.setStatus(TapeRecallStatus.IN_PROGRESS); - taskList.add(task); - } while (res.next()); - if (!taskList.isEmpty()) { - try { - prepStatement = sqlHelper.getQueryUpdateTasksStatus(dbConnection, - taskList, TapeRecallStatus.IN_PROGRESS.getStatusId(), - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, new Date()); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " - + e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } - prepStatement.executeUpdate(); - } - commit(dbConnection); - } catch (SQLException e) { - rollback(dbConnection); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return taskList; - } - - @Override - public List getAllInProgressTasks(int numberOfTaks) - throws DataAccessException { - - Connection dbConnection = getConnection(); - ResultSet res = null; - List taskList = Lists.newArrayList(); - - PreparedStatement prepStatement = null; - - try { - prepStatement = sqlHelper.getQueryGetAllTasksInProgress(dbConnection, - numberOfTaks); - - log.debug("getAllInProgressTasks query: {}", prepStatement); - - res = prepStatement.executeQuery(); - - boolean emptyResultSet = true; - - while (res.next()) { - - emptyResultSet = false; - TapeRecallTO task = new TapeRecallTO(); - setTaskInfo(task, res); - taskList.add(task); - } - - if (emptyResultSet) { - - log.debug("No in progress recall tasks found."); - } - - } catch (Exception e) { - - log.error("Error executing query: {}", prepStatement, e); - throw new DataAccessException("Error executing query: " - + prepStatement, e); - - } finally { - - releaseConnection(res, prepStatement, dbConnection); - } - - return taskList; - } - - private void setTaskInfo(TapeRecallTO task, ResultSet res) - throws DataAccessException { - - if (res == null) { - throw new DataAccessException("Unable to build Task from NULL ResultSet"); - } - - String requestTokenStr = null; - Timestamp insertionInstant; - try { - requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); - insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); - - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve RequestToken String from ResultSet. " + e); - } - try { - task - .setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); - } catch (InvalidTRequestTokenAttributesException e) { - throw new DataAccessException( - "Unable to build TRequestToken from token='" + requestTokenStr + "'. " - + e); - } - - UUID groupTaskId = null; - String groupTaskIdStr = null; - try { - groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); - if (groupTaskIdStr != null) { - try { - groupTaskId = UUID.fromString(groupTaskIdStr); - task.setGroupTaskId(groupTaskId); - } catch (IllegalArgumentException iae) { - throw new DataAccessException( - "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " - + iae); - } - } - } catch (SQLException e) { - throw new DataAccessException( - "Unable to retrieve GroupTaskId String from ResultSet. " + e); - } - - // do not set the task ID, it is produced by the setFilename call - - try { - - task.setRequestType(valueOf(res.getString(TapeRecallMySQLHelper.COL_REQUEST_TYPE))); - task.setFileName(res.getString(TapeRecallMySQLHelper.COL_FILE_NAME)); - task.setPinLifetime(res.getInt(TapeRecallMySQLHelper.COL_PIN_LIFETIME)); - task.setStatusId(res.getInt(TapeRecallMySQLHelper.COL_STATUS)); - task.setVoName(res.getString(TapeRecallMySQLHelper.COL_VO_NAME)); - task.setUserID(res.getString(TapeRecallMySQLHelper.COL_USER_ID)); - task.setRetryAttempt(res.getInt(TapeRecallMySQLHelper.COL_RETRY_ATTEMPT)); - Calendar calendar = new GregorianCalendar(); - task.setDeferredRecallInstant(res.getTimestamp( - TapeRecallMySQLHelper.COL_DEFERRED_STARTTIME, calendar)); - task.setInsertionInstant(res.getTimestamp(TapeRecallMySQLHelper.COL_DATE, - calendar)); - try { - task.forceStatusUpdateInstants(res.getTimestamp( - TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), res - .getTimestamp(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); - } catch (IllegalArgumentException e) { - log.error("Unable to set status update timestamps on the coned task"); - } - } catch (SQLException e) { - throw new DataAccessException("Unable to getting info from ResultSet. " - + e); - } - } - - @Override - public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, - Date timestamp) throws DataAccessException { - - PreparedStatement prepStatement = null; - Connection dbConnection = getConnection(); - - try { - dbConnection.setAutoCommit(false); - } catch (SQLException e) { - log.error("Error setting autocommit to false! {}", e.getMessage()); - throw new DataAccessException("Error setting autocommit to false! " - + e.getMessage(), e); - } - - ResultSet res = null; - boolean ret = false; - int oldStatusId = -1; - - try { - - try { - prepStatement = sqlHelper.getQueryGetGroupTasks(dbConnection, - groupTaskId); - - log.debug("QUERY: {}", prepStatement); - // retrieves the tasks of this task group - res = prepStatement.executeQuery(); - - if (!res.first()) { - log.error("No tasks with GroupTaskId='{}'", groupTaskId); - throw new DataAccessException( - "No recall table row retrieved executing query: '" - + prepStatement + "'"); - } - // verify if their stored status is equal for all - oldStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - do { - int currentStatusId = res.getInt(TapeRecallMySQLHelper.COL_STATUS); - if (currentStatusId != oldStatusId) { - log.warn("The tasks with groupTaskId {} have different statuses: {} from task {} differs " - + "from expected {}", groupTaskId, currentStatusId, - res.getString(TapeRecallMySQLHelper.COL_TASK_ID), oldStatusId); - break; - } - oldStatusId = currentStatusId; - } while (res.next()); - } catch (SQLException e) { - log - .error("Unable to retrieve groupTaskId related tasks. SQLException: {}", e); - throw new DataAccessException( - "Unable to retrieve groupTaskId related tasks. "); - } - if (oldStatusId != newStatusId) { - // update the task status and if is a valid transition set the relative - // transition timestamp - if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes( - newStatusId)) { - log - .warn("Requested the update of the status of a recall task group to status {} that is precedent " - + "to the recorded status performing the request the same...", newStatusId, oldStatusId); - } - String timestampColumn = null; - if (TapeRecallStatus.isFinalStatus(newStatusId)) { - timestampColumn = TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE; - } else { - if (TapeRecallStatus.IN_PROGRESS.equals(TapeRecallStatus - .getRecallTaskStatus(newStatusId))) { - timestampColumn = TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE; - } else { - log - .warn("unable to determine the status update timestamp column to use given the new statusId '{}'", newStatusId); - } - } - if (timestampColumn != null) { - try { - prepStatement = sqlHelper.getQueryUpdateGroupTaskStatus( - dbConnection, groupTaskId, newStatusId, timestampColumn, - timestamp); - } catch (IllegalArgumentException e) { - log - .error("Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: {}", e.getMessage()); - throw new DataAccessException( - "Unable to obtain the query to update task status and set status transition timestamp"); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - try { - prepStatement = sqlHelper.getQuerySetGroupTaskStatus(dbConnection, - groupTaskId, newStatusId); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } - try { - if (prepStatement.executeUpdate() > 0) { - ret = true; - } - commit(dbConnection); - } catch (SQLException e) { - throw new DataAccessException("Error executing query: " - + prepStatement.toString(), e); - } - } else { - log - .warn("Skipping the status upadate operation, the status already stored is equal to the new one provided"); - } - } finally { - releaseConnection(res, prepStatement, dbConnection); - } - return ret; - } + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + if (voName == null) { + ps = sqlHelper.getQueryNumberInProgress(con); + } else { + ps = sqlHelper.getQueryNumberInProgress(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getNumberQueued() throws DataAccessException { + + return getNumberQueued(null); + } + + @Override + public int getNumberQueued(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryNumberQueued(con); + } else { + ps = sqlHelper.getQueryNumberQueued(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + log.error(e.getMessage(), e); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public int getReadyForTakeOver() throws DataAccessException { + + return getReadyForTakeOver(null); + } + + @Override + public int getReadyForTakeOver(String voName) throws DataAccessException { + + Connection con = null; + int status = 0; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryReadyForTakeOver(con); + } else { + ps = sqlHelper.getQueryReadyForTakeOver(con, voName); + } + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + status = res.getInt(1); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return status; + } + + @Override + public List getGroupTasks(UUID groupTaskId) throws DataAccessException { + + TapeRecallTO task = null; + List taskList = Lists.newArrayList(); + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } while (res.next()); + } else { + log.info("No tasks with GroupTaskId='{}'", groupTaskId); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public boolean existsGroupTask(UUID groupTaskId) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + if (!response) { + log.info("No tasks found with GroupTaskId='{}'", groupTaskId); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public Optional getTask(UUID taskId, String requestToken) + throws DataAccessException { + + TapeRecallTO task = null; + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + + if (res.first()) { + task = new TapeRecallTO(); + setTaskInfo(task, res); + } else { + log.info("No task found for requestToken={} taskId={}. Query={}", requestToken, taskId, ps); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return Optional.ofNullable(task); + } + + @Override + public boolean existsTask(UUID taskId, String requestToken) throws DataAccessException { + + boolean response = false; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetTask(con, taskId, requestToken); + + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + response = res.first(); + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return response; + } + + @Override + public UUID insertCloneTask(TapeRecallTO task, int[] statuses, UUID proposedGroupTaskId) + throws DataAccessException { + + if (task.getTaskId() == null || task.getRequestToken() == null + || task.getRequestToken().getValue().trim().isEmpty()) { + log.error( + "received Task insert request with empty primary key field TaskId or RequestToken. TaskId = {}, request token = {}", + task.getTaskId(), task.getRequestToken()); + throw new DataAccessException("Unable to create insert the task with the provided UUID and " + + "request token using UUID-namebased algorithm. TaskId = " + task.getTaskId() + + " , request token = " + task.getRequestToken()); + } + int status = task.getStatusId(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + + con = getConnection(); + + if (statuses == null || statuses.length == 0) { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId()); + } else { + ps = sqlHelper.getQueryGetGroupTaskIds(con, task.getTaskId(), statuses); + } + log.debug("QUERY: {}", ps); + + res = ps.executeQuery(); + + if (res.first()) { + /* Take the first, but there can be more than one result */ + String uuidString = res.getString(COL_GROUP_TASK_ID); + status = res.getInt(COL_STATUS); + task.setStatusId(status); + task.setGroupTaskId(UUID.fromString(uuidString)); + Calendar calendar = new GregorianCalendar(); + try { + task.forceStatusUpdateInstants( + res.getDate(TapeRecallMySQLHelper.COL_IN_PROGRESS_DATE, calendar), + res.getDate(TapeRecallMySQLHelper.COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } else { + log.debug("No task found for taskId={} Creating a new group entry", task.getTaskId()); + task.setGroupTaskId(proposedGroupTaskId); + task.setStatusId(status); + } + + ps = sqlHelper.getQueryInsertTask(con, task); + if (ps == null) { + // this case is possible if and only if the task is null or empty + log.error("Cannot create the query because the task is null or empty."); + throw new DataAccessException("Cannot create the query because the task is null or empty."); + } + log.debug("Query(insert-task)={}", ps); + int n = ps.executeUpdate(); + log.debug("Query(insert-task)={} exited with {}", ps, n); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return task.getGroupTaskId(); + } + + @Override + public int purgeCompletedTasks(long expirationTime, int numTasks) throws DataAccessException { + + PreparedStatement ps = null; + Connection con = null; + int count = 0; + boolean hasLimit = numTasks > 0; + + try { + + con = getConnection(); + if (hasLimit) { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime, numTasks); + } else { + ps = sqlHelper.getQueryDeleteCompletedTasks(con, expirationTime); + } + + count = ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + return count; + } + + @Override + public void setGroupTaskRetryValue(UUID groupTaskId, int value) throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + ps = sqlHelper.getQuerySetGroupTaskRetryValue(con, groupTaskId, value); + ps.executeUpdate(); + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeStatement(ps); + closeConnection(con); + } + + } + + @Override + public TapeRecallTO takeoverTask() throws DataAccessException { + + return takeoverTask(null); + } + + @Override + public TapeRecallTO takeoverTask(String voName) throws DataAccessException { + + List taskList = takeoverTasksWithDoubles(1, voName); + + if (taskList.isEmpty()) { + return null; + } + return taskList.get(0); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks) throws DataAccessException { + + return takeoverTasksWithDoubles(numberOfTaks, null); + } + + @Override + public List takeoverTasksWithDoubles(int numberOfTaks, String voName) + throws DataAccessException { + + List taskList = Lists.newLinkedList(); + TapeRecallTO task = null; + + Connection con = null; + ResultSet res = null; + PreparedStatement ps = null; + + try { + + con = getConnection(); + + if (voName == null) { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks); + } else { + ps = sqlHelper.getQueryGetTakeoverTasksWithDoubles(con, numberOfTaks, voName); + } + + // start transaction + log.debug("QUERY: {}", ps); + res = ps.executeQuery(); + if (res.first()) { + do { + task = new TapeRecallTO(); + setTaskInfo(task, res); + task.setStatus(TapeRecallStatus.IN_PROGRESS); + taskList.add(task); + } while (res.next()); + if (!taskList.isEmpty()) { + try { + ps = sqlHelper.getQueryUpdateTasksStatus(con, taskList, + TapeRecallStatus.IN_PROGRESS.getStatusId(), COL_IN_PROGRESS_DATE, new Date()); + } catch (IllegalArgumentException e) { + log.error( + "Unable to obtain the query to update task status and set status transition timestamp. IllegalArgumentException: " + + e.getMessage()); + throw new DataAccessException( + "Unable to obtain the query to update task status and set status transition timestamp"); + } + ps.executeUpdate(); + } + } else { + log.info("No tape recall rows ready for takeover"); + } + + } catch (SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return taskList; + } + + @Override + public List getAllInProgressTasks(int numberOfTaks) throws DataAccessException { + + List taskList = Lists.newArrayList(); + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + try { + con = getConnection(); + ps = sqlHelper.getQueryGetAllTasksInProgress(con, numberOfTaks); + + log.debug("getAllInProgressTasks query: {}", ps); + + res = ps.executeQuery(); + + boolean emptyResultSet = true; + + while (res.next()) { + + emptyResultSet = false; + TapeRecallTO task = new TapeRecallTO(); + setTaskInfo(task, res); + taskList.add(task); + } + + if (emptyResultSet) { + + log.debug("No in progress recall tasks found."); + } + + } catch (SQLException e) { + + e.printStackTrace(); + + } finally { + + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + + return taskList; + } + + private void setTaskInfo(TapeRecallTO task, ResultSet res) throws DataAccessException { + + if (res == null) { + throw new DataAccessException("Unable to build Task from NULL ResultSet"); + } + + String requestTokenStr = null; + Timestamp insertionInstant; + try { + requestTokenStr = res.getString(TapeRecallMySQLHelper.COL_REQUEST_TOKEN); + insertionInstant = res.getTimestamp(TapeRecallMySQLHelper.COL_DATE); + + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve RequestToken String from ResultSet. " + e); + } + try { + task.setRequestToken(new TRequestToken(requestTokenStr, insertionInstant)); + } catch (InvalidTRequestTokenAttributesException e) { + throw new DataAccessException( + "Unable to build TRequestToken from token='" + requestTokenStr + "'. " + e); + } + + UUID groupTaskId = null; + String groupTaskIdStr = null; + try { + groupTaskIdStr = res.getString(TapeRecallMySQLHelper.COL_GROUP_TASK_ID); + if (groupTaskIdStr != null) { + try { + groupTaskId = UUID.fromString(groupTaskIdStr); + task.setGroupTaskId(groupTaskId); + } catch (IllegalArgumentException iae) { + throw new DataAccessException( + "Unable to build UUID from GroupTaskId='" + groupTaskId + "'. " + iae); + } + } + } catch (SQLException e) { + throw new DataAccessException("Unable to retrieve GroupTaskId String from ResultSet. " + e); + } + + // do not set the task ID, it is produced by the setFilename call + + try { + + task.setRequestType(valueOf(res.getString(COL_REQUEST_TYPE))); + task.setFileName(res.getString(COL_FILE_NAME)); + task.setPinLifetime(res.getInt(COL_PIN_LIFETIME)); + task.setStatusId(res.getInt(COL_STATUS)); + task.setVoName(res.getString(COL_VO_NAME)); + task.setUserID(res.getString(COL_USER_ID)); + task.setRetryAttempt(res.getInt(COL_RETRY_ATTEMPT)); + Calendar calendar = new GregorianCalendar(); + task.setDeferredRecallInstant(res.getTimestamp(COL_DEFERRED_STARTTIME, calendar)); + task.setInsertionInstant(res.getTimestamp(COL_DATE, calendar)); + try { + task.forceStatusUpdateInstants(res.getTimestamp(COL_IN_PROGRESS_DATE, calendar), + res.getTimestamp(COL_FINAL_STATUS_DATE, calendar)); + } catch (IllegalArgumentException e) { + log.error("Unable to set status update timestamps on the coned task"); + } + } catch (SQLException e) { + throw new DataAccessException("Unable to getting info from ResultSet. " + e); + } + } + + @Override + public boolean setGroupTaskStatus(UUID groupTaskId, int newStatusId, Date timestamp) + throws DataAccessException { + + Connection con = null; + PreparedStatement ps = null; + ResultSet res = null; + + boolean ret = false; + int oldStatusId = -1; + + try { + con = getConnection(); + + ps = sqlHelper.getQueryGetGroupTasks(con, groupTaskId); + + log.debug("QUERY: {}", ps); + + // retrieves the tasks of this task group + res = ps.executeQuery(); + + if (!res.first()) { + log.error("No tasks with GroupTaskId='{}'", groupTaskId); + throw new DataAccessException( + "No recall table row retrieved executing query: '" + ps + "'"); + } + + // verify if their stored status is equal for all + oldStatusId = res.getInt(COL_STATUS); + do { + int currentStatusId = res.getInt(COL_STATUS); + if (currentStatusId != oldStatusId) { + log.warn( + "The tasks with groupTaskId {} have different statuses: {} from task {} differs " + + "from expected {}", + groupTaskId, currentStatusId, res.getString(COL_TASK_ID), oldStatusId); + break; + } + oldStatusId = currentStatusId; + } while (res.next()); + + if (oldStatusId != newStatusId) { + // update the task status and if is a valid transition set the relative transition timestamp + if (!TapeRecallStatus.getRecallTaskStatus(oldStatusId).precedes(newStatusId)) { + log.warn( + "Requested the update of the status of a recall task group to status {} that is precedent " + + "to the recorded status performing the request the same...", + newStatusId, oldStatusId); + } + String timestampColumn = null; + if (TapeRecallStatus.isFinalStatus(newStatusId)) { + timestampColumn = COL_FINAL_STATUS_DATE; + } else { + if (TapeRecallStatus.IN_PROGRESS + .equals(TapeRecallStatus.getRecallTaskStatus(newStatusId))) { + timestampColumn = COL_IN_PROGRESS_DATE; + } else { + log.warn( + "unable to determine the status update timestamp column to use given the new statusId '{}'", + newStatusId); + } + } + if (timestampColumn != null) { + ps = sqlHelper.getQueryUpdateGroupTaskStatus(con, groupTaskId, newStatusId, + timestampColumn, timestamp); + } else { + ps = sqlHelper.getQuerySetGroupTaskStatus(con, groupTaskId, newStatusId); + } + if (ps.executeUpdate() > 0) { + ret = true; + } + } else { + log.warn( + "Skipping the status upadate operation, the status already stored is equal to the new one provided"); + } + } catch (IllegalArgumentException | SQLException e) { + e.printStackTrace(); + } finally { + closeResultSet(res); + closeStatement(ps); + closeConnection(con); + } + return ret; + } } diff --git a/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java new file mode 100644 index 000000000..d0746adf7 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/impl/mysql/VolatileAndJiTDAOMySql.java @@ -0,0 +1,611 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.impl.mysql; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + +import it.grid.storm.persistence.dao.AbstractDAO; +import it.grid.storm.persistence.dao.VolatileAndJiTDAO; +import it.grid.storm.persistence.model.JiTData; +import it.grid.storm.persistence.pool.StormDbConnectionPool; + +/** + * DAO class for VolatileAndJiTCatalog: it has been specifically designed for MySQL. + * + * @author EGRID ICTP + * @version 1.0 (based on old PinnedFilesDAO) + * @date November, 2006 + */ +public class VolatileAndJiTDAOMySql extends AbstractDAO implements VolatileAndJiTDAO { + + private static final Logger log = LoggerFactory.getLogger(VolatileAndJiTDAOMySql.class); + + private static VolatileAndJiTDAO instance; + + public static synchronized VolatileAndJiTDAO getInstance() { + if (instance == null) { + instance = new VolatileAndJiTDAOMySql(); + } + return instance; + } + + private VolatileAndJiTDAOMySql() { + super(StormDbConnectionPool.getInstance()); + } + + /** + * Method that inserts a new entry in the JiT table of the DB, consisting of the specified + * filename, the local user uid, the local user gid, the acl, the start time as expressed by UNIX + * epoch (seconds since 00:00:00 1 1 1970) and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addJiT(String filename, int uid, int gid, int acl, long start, long pinLifetime) { + + String sql = + "INSERT INTO jit(file,uid,gid,acl,start,pinLifetime) VALUES(?,?,?,?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, gid); + stmt.setInt(4, acl); + stmt.setLong(5, start); + stmt.setLong(6, pinLifetime); + log.debug("VolatileAndJiTDAO. addJiT: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that inserts a new entry in the Volatile table of the DB, consisting of the specified + * filename, the start time as expressed by UNIX epoch (seconds since 00:00:00 1 1 1970), and the + * number of seconds the file must be kept for. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + */ + public void addVolatile(String filename, long start, long fileLifetime) { + + String sql = "INSERT INTO volatile(file,start,fileLifetime) VALUES(?,FROM_UNIXTIME(?),?)"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setLong(2, start); + stmt.setLong(3, fileLifetime); + log.debug("VolatileAndJiTDAO. addVolatile: {}", stmt); + stmt.execute(); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in addVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Checks whether the given file exists in the volatile table or not. + * + * @param filename + * @return true if there is antry for the given file in the volatilte table, + * false otherwise. + */ + public boolean exists(String filename) { + + String sql = "SELECT ID FROM volatile WHERE file=? LIMIT 1"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + boolean result; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - existsOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + result = true; + } else { + result = false; + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in existsOnVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + result = false; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return result; + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * This method _forces_ the update regardless of the fact that the new expiry lasts less than the + * current one! This method is intended to be used by expireJiT. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void forceUpdateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit " + "SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setLong(1, start); + stmt.setLong(2, pinLifetime); + stmt.setString(3, filename); + stmt.setInt(4, uid); + stmt.setInt(5, acl); + log.debug("VolatileAndJiTDAO. forceUpdateJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. {} jit entries forced updated.", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in forceUpdateJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that returns the number of entries in the catalogue, matching the given filename, uid + * and acl. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberJiT(String filename, int uid, int acl) { + + String sql = "SELECT COUNT(ID) FROM jit WHERE file=? AND uid=? AND acl=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + stmt.setInt(2, uid); + stmt.setInt(3, acl); + log.debug("VolatileAndJiTDAO. numberJiT: {}", stmt); + rs = stmt.executeQuery(); + + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error("VolatileAndJiTDAO! Unexpected situation in numberJiT: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that returns the number of Volatile entries in the catalogue, for the given filename. + * + * Notice that in general there should be either one or none, and more should be taken as + * indication of catalogue corruption. + * + * -1 is returned if there are problems with the DB. + */ + public int numberVolatile(String filename) { + + String sql = "SELECT COUNT(ID) FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + int n = -1; + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. numberVolatile: {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + n = rs.getInt(1); + } else { + log.error( + "VolatileAndJiTDAO! Unexpected situation in numberVolatile: " + "result set empty!"); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in numberVolatile: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return n; + } + + /** + * Method that removes all entries in the JiT table of the DB, that match the specified filename. + * So this action takes place _regardless_ of the user that set up the ACL! + */ + public void removeAllJiTsOn(String filename) { + + String sql = "DELETE FROM jit WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO. removeJiT: {}", stmt); + int n = stmt.executeUpdate(); + log.debug("VolatileAndJiTDAO. removeJiT: {} entries removed", n); + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in removeJiT: {}", e.getMessage(), e); + e.printStackTrace(); + } finally { + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method used to remove all expired entries, both of pinned files and of jit ACLs. Also, when + * removing volatile entries, any jit entry that refers to those expired volatiles will also be + * removed. + * + * The method requires a long representing the time measured as UNIX EPOCH upon which to base the + * purging: entries are evaluated expired when compared to this date. + * + * The method returns an array of two Collections; Collection[0] contains expired volatile entries + * String PFNs, while Collection[1] contains JiTDataTO objects. Collection[1] also contains those + * entries that may not have expired yet, but since the respective Volatile is being removed they + * too must be removed automatically. + * + * WARNING! If any error occurs it gets logged, and an array of two empty Collection is returned. + * This operation is treated as a Transaction by the DB, so a Roll Back should return everything + * to its original state! + */ + public Collection[] removeExpired(long time) { + + String vol = "SELECT ID,file FROM volatile WHERE (UNIX_TIMESTAMP(start)+fileLifetime volat = Lists.newArrayList(); + Collection volatid = Lists.newArrayList(); + while (rs.next()) { + volatid.add(new Long(rs.getLong("ID"))); + volat.add(rs.getString("file")); + } + int nvolat = volatid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // get list of jits + if (nvolat > 0) { + // there are expired volatile entries: adjust jit selection to include + // those SURLs too! + jit = jit + " OR file IN " + makeFileString(volat); + } + stmt = con.prepareStatement(jit); + stmt.setLong(1, time); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + rs = stmt.executeQuery(); + + Collection track = Lists.newArrayList(); + Collection trackid = Lists.newArrayList(); + + while (rs.next()) { + trackid.add(new Long(rs.getLong("ID"))); + JiTData aux = + new JiTData(rs.getString("file"), rs.getInt("acl"), rs.getInt("uid"), rs.getInt("gid")); + track.add(aux); + } + int njit = trackid.size(); + closeResultSet(rs); + closeStatement(stmt); + + // remove entries + Collection volcol = Lists.newArrayList(); + Collection jitcol = Lists.newArrayList(); + try { + con.setAutoCommit(false); // begin transaction! + // delete volatile + int deletedvol = 0; + if (nvolat > 0) { + delvol = delvol + makeIDString(volatid); + stmt = con.prepareStatement(delvol); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedvol = stmt.executeUpdate(); + closeStatement(stmt); + } + // delete jits + int deletedjit = 0; + if (njit > 0) { + deljit = deljit + makeIDString(trackid); + stmt = con.prepareStatement(deljit); + log.debug("VolatileAndJiTDAO. removeExpired: {}", stmt); + deletedjit = stmt.executeUpdate(); + closeStatement(stmt); + } + con.commit(); + con.setAutoCommit(true); // end transaction! + log.debug("VolatileAndJiTDAO. Removed {} volatile catalogue entries " + + "and {} jit catalogue entries.", deletedvol, deletedjit); + volcol = volat; + jitcol = track; + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired... " + "rolling back! {}", + e.getMessage(), e); + con.rollback(); + closeStatement(stmt); + } + + // return collections + return new Collection[] {volcol, jitcol}; + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Unable to complete removeExpired! {}", e.getMessage(), e); + // in case of any failure return an array of two empty Collection + return new Collection[] {Lists.newArrayList(), Lists.newArrayList()}; + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + } + + /** + * Method that updates an existing entry in the JiT table of the DB, consisting of the specified + * filename, the uid and gid of the local user, the acl, the start time as expressed by UNIX epoch + * (seconds since 00:00:00 1 1 1970), and the number of seconds the jit must last. + * + * In the DB, the start time gets translated into DATE:TIME in order to make it more readable. + * pinLifetime remains in seconds. + * + * Entries get updated only if the new expiry calculated by adding start and pinLifetime, is + * larger than the existing one. + * + * Only start and pinLifetime get updated, while filename, uid, gid and acl, are used as criteria + * to select records. + */ + public void updateJiT(String filename, int uid, int acl, long start, long pinLifetime) { + + String sql = "UPDATE jit SET start=FROM_UNIXTIME(?), pinLifetime=? " + + "WHERE file=? AND uid=? AND acl=? AND (UNIX_TIMESTAMP(start)+pinLifetime volatileInfoOn(String filename) { + + String sql = "SELECT UNIX_TIMESTAMP(start), fileLifetime FROM volatile WHERE file=?"; + + Connection con = null; + PreparedStatement stmt = null; + ResultSet rs = null; + List aux = Lists.newArrayList(); + + try { + con = getConnection(); + stmt = con.prepareStatement(sql); + stmt.setString(1, filename); + log.debug("VolatileAndJiTDAO - infoOnVolatile - {}", stmt); + rs = stmt.executeQuery(); + if (rs.next()) { + aux.add(rs.getLong("UNIX_TIMESTAMP(start)")); + aux.add(rs.getLong("fileLifetime")); + } else { + log.debug("VolatileAndJiTDAO! infoOnVolatile did not find {}", filename); + } + } catch (SQLException e) { + log.error("VolatileAndJiTDAO! Error in infoOnVolatile: {}", e.getMessage(), e); + } finally { + closeResultSet(rs); + closeStatement(stmt); + closeConnection(con); + } + return aux; + } + + /** + * Method that returns a String containing all Files. + */ + private String makeFileString(Collection files) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = files.iterator(); i.hasNext();) { + sb.append("'"); + sb.append(i.next()); + sb.append("'"); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } + + /** + * Method that returns a String containing all IDs. + */ + private String makeIDString(Collection rowids) { + + StringBuilder sb = new StringBuilder("("); + for (Iterator i = rowids.iterator(); i.hasNext();) { + sb.append(String.valueOf(i.next())); + if (i.hasNext()) { + sb.append(","); + } + } + sb.append(")"); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java new file mode 100644 index 000000000..ec610312c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousFileTransferData.java @@ -0,0 +1,85 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ + +public abstract class AnonymousFileTransferData extends SurlMultyOperationRequestData + implements FileTransferData { + + protected TURLPrefix transferProtocols; + protected TTURL transferURL; + + public AnonymousFileTransferData(TSURL toSURL, TURLPrefix transferProtocols, TReturnStatus status, + TTURL transferURL) + throws InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(toSURL, status); + if (transferProtocols == null || transferURL == null) { + throw new InvalidFileTransferDataAttributesException(toSURL, transferProtocols, status, + transferURL); + } + this.transferProtocols = transferProtocols; + this.transferURL = transferURL; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#getTransferProtocols() + */ + @Override + public final TURLPrefix getTransferProtocols() { + + return transferProtocols; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#getTransferURL() + */ + @Override + public final TTURL getTransferURL() { + + return transferURL; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.FileTransferData#setTransferURL(it.grid.storm.srm .types.TTURL) + */ + @Override + public final void setTransferURL(final TTURL turl) { + + if (turl != null) { + transferURL = turl; + } + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java new file mode 100644 index 000000000..103583402 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtGData.java @@ -0,0 +1,229 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a PrepareToGetChunkData, that is part of a multifile PrepareToGet srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author EGRID - ICTP Trieste + * @date March 21st, 2005 + * @version 3.0 + */ +public class AnonymousPtGData extends AnonymousFileTransferData implements PtGData { + + private static final Logger log = LoggerFactory.getLogger(AnonymousPtGData.class); + + /** requested lifetime of TURL: it is the pin time! */ + protected TLifeTimeInSeconds pinLifeTime; + /** specifies if the request regards a directory and related info */ + protected TDirOption dirOption; + /** size of file */ + protected TSizeInBytes fileSize; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public AnonymousPtGData(TSURL SURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException { + + super(SURL, desiredProtocols, status, transferURL); + if (lifeTime == null || dirOption == null || fileSize == null) { + log.debug("Invalid arguments: lifeTime={}, dirOption={}, fileSize={}", lifeTime, dirOption, + fileSize); + throw new InvalidPtGDataAttributesException(SURL, lifeTime, dirOption, desiredProtocols, + fileSize, status, transferURL); + + } + this.pinLifeTime = lifeTime; + this.dirOption = dirOption; + this.fileSize = fileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getPinLifeTime() + */ + @Override + public TLifeTimeInSeconds getPinLifeTime() { + + return pinLifeTime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getDirOption() + */ + @Override + public TDirOption getDirOption() { + + return dirOption; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#getFileSize() + */ + @Override + public TSizeInBytes getFileSize() { + + return fileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#setFileSize(it.grid.storm.srm.types.TSizeInBytes ) + */ + @Override + public void setFileSize(TSizeInBytes size) { + + if (size != null) { + fileSize = size; + } + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtGData#changeStatusSRM_FILE_PINNED(java.lang.String ) + */ + @Override + public void changeStatusSRM_FILE_PINNED(String explanation) { + + setStatus(TStatusCode.SRM_FILE_PINNED, explanation); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtGChunkData [pinLifeTime="); + builder.append(pinLifeTime); + builder.append(", dirOption="); + builder.append(dirOption); + builder.append(", fileSize="); + builder.append(fileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((dirOption == null) ? 0 : dirOption.hashCode()); + result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode()); + result = prime * result + ((pinLifeTime == null) ? 0 : pinLifeTime.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + AnonymousPtGData other = (AnonymousPtGData) obj; + if (dirOption == null) { + if (other.dirOption != null) { + return false; + } + } else if (!dirOption.equals(other.dirOption)) { + return false; + } + if (fileSize == null) { + if (other.fileSize != null) { + return false; + } + } else if (!fileSize.equals(other.fileSize)) { + return false; + } + if (pinLifeTime == null) { + if (other.pinLifeTime != null) { + return false; + } + } else if (!pinLifeTime.equals(other.pinLifeTime)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java new file mode 100644 index 000000000..b2451461f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/AnonymousPtPData.java @@ -0,0 +1,238 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +/** + * @author Michele Dibenedetto + * + */ +public class AnonymousPtPData extends AnonymousFileTransferData implements PtPData { + + private static final Logger log = LoggerFactory.getLogger(AnonymousPtPData.class); + + protected TSpaceToken spaceToken; + protected TLifeTimeInSeconds pinLifetime; + protected TLifeTimeInSeconds fileLifetime; + protected TFileStorageType fileStorageType; + protected TOverwriteMode overwriteOption; + protected TSizeInBytes expectedFileSize; + + public AnonymousPtPData(TSURL toSURL, TLifeTimeInSeconds pinLifetime, + TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, TOverwriteMode overwriteOption, + TReturnStatus status, TTURL transferURL) throws InvalidPtPDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(toSURL, transferProtocols, status, transferURL); + if (pinLifetime == null || fileLifetime == null || spaceToken == null || fileStorageType == null + || expectedFileSize == null || overwriteOption == null) { + log.debug( + "Invalid arguments: pinLifetime={}, fileLifetime={}, " + + "spaceToken={}, fileStorageType={}, expectedFileSize={}, " + "overwriteOption={}", + pinLifetime, fileLifetime, spaceToken, fileStorageType, expectedFileSize, + overwriteOption); + throw new InvalidPtPDataAttributesException(toSURL, pinLifetime, fileLifetime, + fileStorageType, spaceToken, expectedFileSize, transferProtocols, overwriteOption, status, + transferURL); + } + this.spaceToken = spaceToken; + this.pinLifetime = pinLifetime; + this.fileLifetime = fileLifetime; + this.fileStorageType = fileStorageType; + this.expectedFileSize = expectedFileSize; + this.overwriteOption = overwriteOption; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#getSpaceToken() + */ + @Override + public final TSpaceToken getSpaceToken() { + + return spaceToken; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#pinLifetime() + */ + @Override + public TLifeTimeInSeconds pinLifetime() { + + return pinLifetime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#fileLifetime() + */ + @Override + public TLifeTimeInSeconds fileLifetime() { + + return fileLifetime; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#fileStorageType() + */ + @Override + public TFileStorageType fileStorageType() { + + return fileStorageType; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#expectedFileSize() + */ + @Override + public TSizeInBytes expectedFileSize() { + + return expectedFileSize; + } + + /* + * (non-Javadoc) + * + * @see it.grid.storm.catalogs.PtPData#overwriteOption() + */ + @Override + public TOverwriteMode overwriteOption() { + + return overwriteOption; + } + + /** + * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public void changeStatusSRM_SPACE_AVAILABLE(String explanation) { + + setStatus(TStatusCode.SRM_SPACE_AVAILABLE, explanation); + } + + /** + * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public void changeStatusSRM_DUPLICATION_ERROR(String explanation) { + + setStatus(TStatusCode.SRM_DUPLICATION_ERROR, explanation); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("PtPChunkData\n"); + sb.append("toSURL="); + sb.append(SURL); + sb.append("; "); + sb.append("pinLifetime="); + sb.append(pinLifetime); + sb.append("; "); + sb.append("fileLifetime="); + sb.append(fileLifetime); + sb.append("; "); + sb.append("fileStorageType="); + sb.append(fileStorageType); + sb.append("; "); + sb.append("spaceToken="); + sb.append(spaceToken); + sb.append("; "); + sb.append("expectedFileSize="); + sb.append(expectedFileSize); + sb.append("; "); + sb.append("transferProtocols="); + sb.append(transferProtocols); + sb.append("; "); + sb.append("overwriteOption="); + sb.append(overwriteOption); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("; "); + sb.append("transferURL="); + sb.append(transferURL); + sb.append("; "); + return sb.toString(); + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + SURL.hashCode(); + hash = 37 * hash + pinLifetime.hashCode(); + hash = 37 * hash + fileLifetime.hashCode(); + hash = 37 * hash + fileStorageType.hashCode(); + hash = 37 * hash + spaceToken.hashCode(); + hash = 37 * hash + expectedFileSize.hashCode(); + hash = 37 * hash + transferProtocols.hashCode(); + hash = 37 * hash + overwriteOption.hashCode(); + hash = 37 * hash + status.hashCode(); + hash = 37 * hash + transferURL.hashCode(); + return hash; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof AnonymousPtPData)) { + return false; + } + AnonymousPtPData cd = (AnonymousPtPData) o; + return SURL.equals(cd.SURL) && pinLifetime.equals(cd.pinLifetime) + && fileLifetime.equals(cd.fileLifetime) && fileStorageType.equals(cd.fileStorageType) + && spaceToken.equals(cd.spaceToken) && expectedFileSize.equals(cd.expectedFileSize) + && transferProtocols.equals(cd.transferProtocols) + && overwriteOption.equals(cd.overwriteOption) && status.equals(cd.status) + && transferURL.equals(cd.transferURL); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java new file mode 100644 index 000000000..40ca5644d --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLChunkDataTO.java @@ -0,0 +1,268 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * BoLChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * dirOption false status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLChunkDataTO { + + /* Database table request_Bol fields BEGIN */ + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private boolean dirOption; // initialised in constructor + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int lifetime = 0; + private boolean allLevelRecursive; // initialised in constructor + private int numLevel; // initialised in constructor + private List protocolList = null; // initialised in constructor + private long filesize = 0; + private int status; // initialised in constructor + private String errString = " "; + private int deferredStartTime = -1; + private Timestamp timeStamp = null; + + public BoLChunkDataTO() { + + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + this.dirOption = false; + this.allLevelRecursive = false; + this.numLevel = 0; + } + + public boolean getAllLevelRecursive() { + + return allLevelRecursive; + } + + public int getDeferredStartTime() { + + return deferredStartTime; + } + + public boolean getDirOption() { + + return dirOption; + } + + public String getErrString() { + + return errString; + } + + public long getFileSize() { + + return filesize; + } + + public String getFromSURL() { + + return fromSURL; + } + + public int getLifeTime() { + + return lifetime; + } + + public int getNumLevel() { + + return numLevel; + } + + public long getPrimaryKey() { + + return primaryKey; + } + + public List getProtocolList() { + + return protocolList; + } + + public String getRequestToken() { + + return requestToken; + } + + public Timestamp getTimeStamp() { + + return timeStamp; + } + + public int getStatus() { + + return status; + } + + public void setAllLevelRecursive(boolean b) { + + allLevelRecursive = b; + } + + public void setDeferredStartTime(int deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + } + + public void setDirOption(boolean b) { + + dirOption = b; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFileSize(long n) { + + filesize = n; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setLifeTime(int n) { + + lifetime = n; + } + + public void setNumLevel(int n) { + + numLevel = n; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) { + protocolList = l; + } + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public void setStatus(int n) { + + status = n; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer sulrUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(lifetime); + sb.append(" "); + sb.append(dirOption); + sb.append(" "); + sb.append(allLevelRecursive); + sb.append(" "); + sb.append(numLevel); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(filesize); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLData.java b/src/main/java/it/grid/storm/persistence/model/BoLData.java new file mode 100644 index 000000000..1ca1fe124 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLData.java @@ -0,0 +1,145 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.common.types.TimeUnit; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents a BringOnLineChunkData, that is part of a multifile BringOnLine srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLData extends AnonymousFileTransferData { + + private static final Logger log = LoggerFactory.getLogger(BoLData.class); + + /** + * requested lifetime of TURL: it is the pin time! + */ + private TLifeTimeInSeconds lifeTime; + + /** + * specifies if the request regards a directory and related info + */ + private TDirOption dirOption; + + /** + * size of file + */ + private TSizeInBytes fileSize; + + /** + * how many seconds to wait before to make the lifeTime start consuming + */ + private int deferredStartTime = 0; + + public BoLData(TSURL fromSURL, TLifeTimeInSeconds lifeTime, TDirOption dirOption, + TURLPrefix desiredProtocols, TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL, + int deferredStartTime) throws InvalidFileTransferDataAttributesException, + InvalidBoLDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(fromSURL, desiredProtocols, status, transferURL); + if (lifeTime == null || dirOption == null || fileSize == null) { + throw new InvalidBoLDataAttributesException(fromSURL, lifeTime, dirOption, desiredProtocols, + fileSize, status, transferURL); + } + this.lifeTime = lifeTime; + this.dirOption = dirOption; + this.fileSize = fileSize; + this.deferredStartTime = deferredStartTime; + } + + public int getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * Method that returns the dirOption specified in the srm request. + */ + public TDirOption getDirOption() { + + return dirOption; + } + + /** + * Method that returns the file size for this chunk of the srm request. + */ + public TSizeInBytes getFileSize() { + + return fileSize; + } + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds getLifeTime() { + + return lifeTime; + } + + public void setDeferredStartTime(int deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + } + + /** + * Method used to set the size of the file corresponding to the requested SURL. If the supplied + * TSizeInByte is null, then nothing gets set! + */ + public void setFileSize(TSizeInBytes size) { + + if (size != null) { + fileSize = size; + } + } + + public void setLifeTime(long lifeTimeInSeconds) { + + TLifeTimeInSeconds lifeTime; + try { + lifeTime = TLifeTimeInSeconds.make(lifeTimeInSeconds, TimeUnit.SECONDS); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + return; + } + + this.lifeTime = lifeTime; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java new file mode 100644 index 000000000..1aaf7f6df --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/BoLPersistentChunkData.java @@ -0,0 +1,109 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.persistence.exceptions.InvalidBoLDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidBoLPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a BringOnLineChunkData, that is part of a multifile BringOnLine srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author CNAF + * @version 1.0 + * @date Aug 2009 + */ +public class BoLPersistentChunkData extends BoLData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(BoLPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Put table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private final TRequestToken requestToken; + + public BoLPersistentChunkData(TRequestToken requestToken, TSURL fromSURL, + TLifeTimeInSeconds lifeTime, TDirOption dirOption, TURLPrefix desiredProtocols, + TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL, int deferredStartTime) + throws InvalidBoLPersistentChunkDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidBoLDataAttributesException, + InvalidSurlRequestDataAttributesException { + + super(fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL, + deferredStartTime); + if (requestToken == null) { + log.debug("BoLPersistentChunkData: requestToken is null!"); + throw new InvalidBoLPersistentChunkDataAttributesException(requestToken, fromSURL, lifeTime, + dirOption, desiredProtocols, fileSize, status, transferURL); + } + this.requestToken = requestToken; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + public TRequestToken getRequestToken() { + + return requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ChunkData.java b/src/main/java/it/grid/storm/persistence/model/ChunkData.java new file mode 100644 index 000000000..fc684290e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ChunkData.java @@ -0,0 +1,10 @@ +package it.grid.storm.persistence.model; + +public interface ChunkData extends RequestData { + + /** + * Method that returns the primary key in persistence, associated with This Chunk. + */ + public long getIdentifier(); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/FileTransferData.java b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java new file mode 100644 index 000000000..c343f923c --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/FileTransferData.java @@ -0,0 +1,25 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.srm.types.TTURL; + +public interface FileTransferData extends SynchMultyOperationRequestData { + + /** + * Method that returns a TURLPrefix containing the transfer protocols desired for this chunk of + * the srm request. + */ + public TURLPrefix getTransferProtocols(); + + /** + * Method that returns the TURL for this chunk of the srm request. + */ + public TTURL getTransferURL(); + + /** + * Method used to set the transferURL associated to the SURL of this chunk. If TTURL is null, then + * nothing gets set! + */ + public void setTransferURL(final TTURL turl); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/GUID.java b/src/main/java/it/grid/storm/persistence/model/GUID.java deleted file mode 100644 index e316630d5..000000000 --- a/src/main/java/it/grid/storm/persistence/model/GUID.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import java.io.Serializable; - -import java.net.InetAddress; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -//FIXME: Why isn't storm using the standard UUID class? - - -/** - * GUID Value Object. - *

- * Used to retain/generate a GUID/UUID. - *

- */ - -public class GUID implements Serializable { - - private static final long serialVersionUID = 7241176020077117264L; - - private static final Logger log = LoggerFactory.getLogger(GUID.class); - - private byte guidValue[] = new byte[16]; - - public GUID() { - buildNewGUID(); - } - - public GUID(String guidString) { - - int pos = 0; - int count = 0; - - while (pos < guidString.length()) { - guidValue[count] = getByteValue(guidString.substring(pos, pos + 2)); - pos += 2; - count++; - - if (pos == guidString.length()) { - continue; - } - - if (guidString.charAt(pos) == '-') { - pos++; - } - } - } - - - private byte getByteValue(String hex) { - - return (byte) Integer.parseInt(hex, 16); - } - - private String getHexString(byte val) { - - String hexString; - if (val < 0) { - hexString = Integer.toHexString(val + 256); - } else { - hexString = Integer.toHexString(val); - } - - if (hexString.length() < 2) { - return "0" + hexString.toUpperCase(); - } - return hexString.toUpperCase(); - } - - private void setByteValues(byte[] lg, int startPos, int count) { - - for (int i = 0; i < count; i++) { - guidValue[i + startPos] = lg[i]; - } - } - - private void setByteValues(long lg, int startPos, int count) { - - for (int i = 0; i < count; i++) { - guidValue[i + startPos] = (byte) (lg & 0xFF); - lg = lg / 0xFF; - } - } - - private void buildNewGUID() { - - try { - // The time in milli seconds for six bytes - // gives us until the year 10000ish. - long lg = System.currentTimeMillis(); - setByteValues(lg, 0, 6); - - // The hash code for this object for two bytes (As a why not option?) - lg = this.hashCode(); - setByteValues(lg, 6, 2); - - // The ip address for this computer (as we cannot get to the MAC address) - InetAddress inet = InetAddress.getLocalHost(); - byte[] bytes = inet.getAddress(); - setByteValues(bytes, 8, 4); - - // A random number for two bytes - lg = (long) ((Math.random() * 0xFFFF)); - setByteValues(lg, 12, 2); - - // Another random number for two bytes - lg = (long) ((Math.random() * 0xFFFF)); - setByteValues(lg, 14, 2); - - } catch (Exception e) { - log.error("GUID generation error : {}", e.getMessage(), e); - } - } - - public byte[] getBytes() { - - return guidValue; - } - - /** - * Overrides toString(). Returns the array of bytes in the standard form: - * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - * - * @return the string format - */ - @Override - public String toString() { - - StringBuilder buf = new StringBuilder(); - - buf.append(getHexString(guidValue[0])); - buf.append(getHexString(guidValue[1])); - buf.append(getHexString(guidValue[2])); - buf.append(getHexString(guidValue[3])); - buf.append('-'); - buf.append(getHexString(guidValue[4])); - buf.append(getHexString(guidValue[5])); - buf.append('-'); - buf.append(getHexString(guidValue[6])); - buf.append(getHexString(guidValue[7])); - buf.append('-'); - buf.append(getHexString(guidValue[8])); - buf.append(getHexString(guidValue[9])); - buf.append('-'); - buf.append(getHexString(guidValue[10])); - buf.append(getHexString(guidValue[11])); - buf.append(getHexString(guidValue[12])); - buf.append(getHexString(guidValue[13])); - buf.append(getHexString(guidValue[14])); - buf.append(getHexString(guidValue[15])); - - return buf.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java new file mode 100644 index 000000000..e0442edb6 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtGData.java @@ -0,0 +1,73 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TTURL; +import it.grid.storm.synchcall.data.IdentityInputData; + +public class IdentityPtGData extends AnonymousPtGData implements IdentityInputData { + + private final GridUserInterface auth; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public IdentityPtGData(GridUserInterface auth, TSURL SURL, TLifeTimeInSeconds lifeTime, + TDirOption dirOption, TURLPrefix desiredProtocols, TSizeInBytes fileSize, + TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException, IllegalArgumentException { + + super(SURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + if (auth == null) { + throw new IllegalArgumentException( + "Unable to create the object, invalid arguments: auth=" + auth); + } + this.auth = auth; + } + + @Override + public GridUserInterface getUser() { + + return auth; + } + + @Override + public String getPrincipal() { + + return this.auth.getDn(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java new file mode 100644 index 000000000..81a4906e4 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/IdentityPtPData.java @@ -0,0 +1,69 @@ +/** + * + */ +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; +import it.grid.storm.synchcall.data.IdentityInputData; + +/** + * @author Michele Dibenedetto + * + */ +public class IdentityPtPData extends AnonymousPtPData implements IdentityInputData { + + private final GridUserInterface auth; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public IdentityPtPData(GridUserInterface auth, TSURL SURL, TLifeTimeInSeconds pinLifetime, + TLifeTimeInSeconds fileLifetime, TFileStorageType fileStorageType, TSpaceToken spaceToken, + TSizeInBytes expectedFileSize, TURLPrefix transferProtocols, TOverwriteMode overwriteOption, + TReturnStatus status, TTURL transferURL) + throws InvalidPtPDataAttributesException, InvalidFileTransferDataAttributesException, + InvalidSurlRequestDataAttributesException, IllegalArgumentException { + + super(SURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, expectedFileSize, + transferProtocols, overwriteOption, status, transferURL); + if (auth == null) { + throw new IllegalArgumentException( + "Unable to create the object, invalid arguments: auth=" + auth); + } + this.auth = auth; + } + + @Override + public GridUserInterface getUser() { + + return auth; + } + + @Override + public String getPrincipal() { + + return this.auth.getDn(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java b/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java deleted file mode 100644 index db429957e..000000000 --- a/src/main/java/it/grid/storm/persistence/model/InvalidPtGChunkDataAttributesException.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TStorageSystemInfo; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TTURL; - -/** - * This class represents an exceptin thrown when the attributes supplied to the - * constructor of PtGChunkData are invalid, that is if any of the following is - * _null_: requestToken, fromSURL, storageSystemInfo, lifeTime, fileStorageType, - * spaceToken, numOfLevels, TURLPrefix transferProtocols, fileSize, status, - * estimatedWaitTimeOnQueue, estimatedProcessingTime, transferURL, - * remainingPinTime. - * - * @author EGRID - ICTP Trieste - * @date March 23rd, 2005 - * @version 2.0 - */ -public class InvalidPtGChunkDataAttributesException extends Exception { - - // booleans that indicate whether the corresponding variable is null - private boolean nullRequestToken; - private boolean nullFromSURL; - private boolean nullStorageSystemInfo; - private boolean nullLifeTime; - private boolean nullFileStorageType; - private boolean nullSpaceToken; - private boolean nullDirOption; - private boolean nullTransferProtocols; - private boolean nullFileSize; - private boolean nullStatus; - private boolean nullEstimatedWaitTimeOnQueue; - private boolean nullEstimatedProcessingTime; - private boolean nullTransferURL; - private boolean nullRemainingPinTime; - - /** - * Constructor that requires the attributes that caused the exception to be - * thrown. - */ - public InvalidPtGChunkDataAttributesException(TRequestToken requestToken, - TSURL fromSURL, TStorageSystemInfo storageSystemInfo, - TLifeTimeInSeconds lifeTime, TFileStorageType fileStorageType, - TSpaceToken spaceToken, TDirOption dirOption, TURLPrefix transferProtocols, - TSizeInBytes fileSize, TReturnStatus status, - TLifeTimeInSeconds estimatedWaitTimeOnQueue, - TLifeTimeInSeconds estimatedProcessingTime, TTURL transferURL, - TLifeTimeInSeconds remainingPinTime) { - - nullRequestToken = requestToken == null; - nullFromSURL = fromSURL == null; - nullStorageSystemInfo = storageSystemInfo == null; - nullLifeTime = lifeTime == null; - nullFileStorageType = fileStorageType == null; - nullSpaceToken = spaceToken == null; - nullDirOption = dirOption == null; - nullTransferProtocols = transferProtocols == null; - nullFileSize = fileSize == null; - nullStatus = status == null; - nullEstimatedWaitTimeOnQueue = estimatedWaitTimeOnQueue == null; - nullEstimatedProcessingTime = estimatedProcessingTime == null; - nullTransferURL = transferURL == null; - nullRemainingPinTime = remainingPinTime == null; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid PtGChunkData attributes: null-requestToken="); - sb.append(nullRequestToken); - sb.append("; nul-fromSURL="); - sb.append(nullFromSURL); - sb.append("; null-storageSystemInfo="); - sb.append(nullStorageSystemInfo); - sb.append("; null-lifeTime="); - sb.append(nullLifeTime); - sb.append("; null-filestorageType="); - sb.append(nullFileStorageType); - sb.append("; null-spaceToken="); - sb.append(nullSpaceToken); - sb.append("; null-dirOption="); - sb.append(nullDirOption); - sb.append("; null-transferProtocols="); - sb.append(nullTransferProtocols); - sb.append("; null-fileSize="); - sb.append(nullFileSize); - sb.append("; null-status="); - sb.append(nullStatus); - sb.append("; null-estimatedWaitTimeOnQueue="); - sb.append(nullEstimatedWaitTimeOnQueue); - sb.append("; null-estimatedProcessingTime="); - sb.append(nullEstimatedProcessingTime); - sb.append("; null-transferURL="); - sb.append(nullTransferURL); - sb.append("; null-remainingPinTime="); - sb.append(nullRemainingPinTime); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java b/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java deleted file mode 100644 index b2c54f9f2..000000000 --- a/src/main/java/it/grid/storm/persistence/model/InvalidRequestSummaryDataAttributesException.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except - * in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; - -/** - * This class represents an Exception thrown when a RequestSummaryData object is created with any - * invalid attributes: null TRequestToken, null TRequestType, totalFilesInThisRequest<0, - * numOfQueuedRequests<0, numOfProgessingRequests<0, numFinished<0. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 2.0 - */ -public class InvalidRequestSummaryDataAttributesException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - - private final boolean nullRequestToken; - private final boolean nullRequestType; - private final boolean negTotalFilesInThisRequest; - private final boolean negNumOfQueuedRequests; - private final boolean negNumOfProgressingRequests; - private final boolean negNumFinished; - - /** - * Constructor that requires the attributes that caused the exception to be thrown. - */ - public InvalidRequestSummaryDataAttributesException(TRequestToken requestToken, - TRequestType requestType, int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished) { - - nullRequestToken = (requestToken == null); - nullRequestType = (requestType == null); - negTotalFilesInThisRequest = (totalFilesInThisRequest < 0); - negNumOfQueuedRequests = (numOfQueuedRequests < 0); - negNumOfProgressingRequests = (numOfProgressingRequests < 0); - negNumFinished = (numFinished < 0); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("Invalid RequestSummaryData attributes exception: "); - sb.append("null-requestToken="); - sb.append(nullRequestToken); - sb.append("; null-requestType="); - sb.append(nullRequestType); - sb.append("; negative-totalFilesInThisRequest="); - sb.append(negTotalFilesInThisRequest); - sb.append("; negative-numOfQueuedRequests="); - sb.append(negNumOfQueuedRequests); - sb.append("; negative-numOfProgressingRequests="); - sb.append(negNumOfProgressingRequests); - sb.append("; negative-numFinished="); - sb.append(negNumFinished); - sb.append("."); - return sb.toString(); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/JiTData.java b/src/main/java/it/grid/storm/persistence/model/JiTData.java new file mode 100644 index 000000000..83403028f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/JiTData.java @@ -0,0 +1,71 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +/** + * Class that represents data associated to JiT entries. It contains a String representing the file, + * an int representing the ACL, an int representing the user UID, an int representing the user GID. + * + * @author EGRID - ICTP Trieste + * @version 1.0 + * @date November 2006 + */ +public class JiTData { + + private String file = ""; + private int uid = -1; + private int gid = -1; + private int acl = -1; + + /** + * Constructor requiring the complete name of the file as String, the acl as int, the uid and + * primary gid of the LocalUser bith as int. + */ + public JiTData(String file, int acl, int uid, int gid) { + + this.file = file; + this.acl = acl; + this.uid = uid; + this.gid = gid; + } + + public String pfn() { + + return file; + } + + public int acl() { + + return acl; + } + + public int uid() { + + return uid; + } + + public int gid() { + + return gid; + } + + public String toString() { + + return "file=" + file + " acl=" + acl + " uid=" + uid + " gid=" + gid; + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java new file mode 100644 index 000000000..16f93a792 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PersistentChunkData.java @@ -0,0 +1,10 @@ +package it.grid.storm.persistence.model; + +public interface PersistentChunkData extends ChunkData { + + /** + * Method that returns the primary key in persistence, associated with This Chunk. + */ + public long getPrimaryKey(); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java new file mode 100644 index 000000000..402a76645 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGChunkDataTO.java @@ -0,0 +1,310 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.common.types.TURLPrefix; +import java.sql.Timestamp; +import java.util.List; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * PtGChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * dirOption false status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author EGRID ICTP + * @version 3.0 + * @date June 2005 + */ +public class PtGChunkDataTO { + + private static final String FQAN_SEPARATOR = "#"; + /* Database table request_Get fields BEGIN */ + private long primaryKey = -1; // ID primary key of record in DB + private boolean dirOption; // initialised in constructor + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int lifetime = 0; + private boolean allLevelRecursive; // initialised in constructor + private int numLevel; // initialised in constructor + private List protocolList = null; // initialised in constructor + private long filesize = 0; + private int status; // initialised in constructor + private String errString = " "; + private String turl = " "; + private Timestamp timeStamp; + private String clientDN = null; + private String vomsAttributes = null; + + public PtGChunkDataTO() { + + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + this.dirOption = false; + // + this.allLevelRecursive = false; + this.numLevel = 0; + } + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public Timestamp timeStamp() { + + return timeStamp; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public String fromSURL() { + + return fromSURL; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param sURLUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer sURLUniqueID) { + + this.surlUniqueID = sURLUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int lifeTime() { + + return lifetime; + } + + public void setLifeTime(int n) { + + lifetime = n; + } + + public boolean dirOption() { + + return dirOption; + } + + public void setDirOption(boolean b) { + + dirOption = b; + } + + public boolean allLevelRecursive() { + + return allLevelRecursive; + } + + public void setAllLevelRecursive(boolean b) { + + allLevelRecursive = b; + } + + public int numLevel() { + + return numLevel; + } + + public void setNumLevel(int n) { + + numLevel = n; + } + + public List protocolList() { + + return protocolList; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) + protocolList = l; + } + + public long fileSize() { + + return filesize; + } + + public void setFileSize(long n) { + + filesize = n; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String turl() { + + return turl; + } + + public void setTurl(String s) { + + turl = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + vomsAttributes = s; + } + + public void setVomsAttributes(String[] fqaNsAsString) { + + vomsAttributes = ""; + for (int i = 0; i < fqaNsAsString.length; i++) { + vomsAttributes += fqaNsAsString[i]; + if (i < fqaNsAsString.length - 1) { + vomsAttributes += FQAN_SEPARATOR; + } + } + + } + + public String[] vomsAttributesArray() { + + return vomsAttributes.split(FQAN_SEPARATOR); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(lifetime); + sb.append(" "); + sb.append(dirOption); + sb.append(" "); + sb.append(allLevelRecursive); + sb.append(" "); + sb.append(numLevel); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(filesize); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(turl); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java b/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java deleted file mode 100644 index eed4500da..000000000 --- a/src/main/java/it/grid/storm/persistence/model/PtGChunkTO.java +++ /dev/null @@ -1,508 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.common.types.TURLPrefix; -import it.grid.storm.srm.types.TDirOption; -import it.grid.storm.srm.types.TFileStorageType; -import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TReturnStatus; -import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSizeInBytes; -import it.grid.storm.srm.types.TSpaceToken; -import it.grid.storm.srm.types.TStatusCode; -import it.grid.storm.srm.types.TStorageSystemInfo; -import it.grid.storm.srm.types.TTURL; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class represents a PrepareToGetChunkData, that is part of a multifile - * PrepareToGet srm request. It contains data about: the requestToken, the - * fromSURL and the storageSystemInfo for that SURL, the requested lifeTime of - * pinning, the requested fileStorageType and any available spaceToken, the - * TDirOption which explains whether the requested SURL is a directory and if it - * must be recursed at all levels, as well as the desired number of levels to - * recurse, the desired transferProtocols in order of preference, the fileSize, - * the estimatedTimeOnQueue, the estimatedProcessingTime, the transferURL for - * the supplied SURL, and the remainingPinTime. - * - * @author EGRID - ICTP Trieste - * @date March 21st, 2005 - * @version 2.0 - */ -public class PtGChunkTO { - - private static final Logger log = LoggerFactory.getLogger(PtGChunkTO.class); - - private TRequestToken requestToken; - - private TSURL fromSURL; - private TStorageSystemInfo storageSystemInfo; - - private TLifeTimeInSeconds lifeTime; // requested lifetime for fromSURL - - // BEWARE!!! It is the pin time!!! - private TFileStorageType fileStorageType; // TFileStorageType requested for - // specific fromSURL to get - private TSpaceToken spaceToken; // SpaceToken to use for fromSURL - private TDirOption dirOption; // specifies if the request regards a directory - // and related info - - private TURLPrefix transferProtocols; // list of desired transport protocols - // for fromSURL - - private TSizeInBytes fileSize; // size of file - private TReturnStatus status; // return status for this chunk of request - private TLifeTimeInSeconds estimatedWaitTimeOnQueue; // estimated time this - // chunk will remain in - // queue - private TLifeTimeInSeconds estimatedProcessingTime; // estimated time this - // chunk will take to be - // processed - private TTURL transferURL; // TURL for picking up the requested file - private TLifeTimeInSeconds remainingPinTime; // estimated time remaining for - // Pin validity - - public PtGChunkTO(TRequestToken requestToken, TSURL fromSURL, - TStorageSystemInfo storageSystemInfo, TLifeTimeInSeconds lifeTime, - TFileStorageType fileStorageType, TSpaceToken spaceToken, - TDirOption dirOption, TURLPrefix transferProtocols, TSizeInBytes fileSize, - TReturnStatus status, TLifeTimeInSeconds estimatedWaitTimeOnQueue, - TLifeTimeInSeconds estimatedProcessingTime, TTURL transferURL, - TLifeTimeInSeconds remainingPinTime) - throws InvalidPtGChunkDataAttributesException { - - boolean ok = requestToken != null && fromSURL != null - && storageSystemInfo != null && lifeTime != null - && fileStorageType != null && spaceToken != null && dirOption != null - && transferProtocols != null && fileSize != null && status != null - && estimatedWaitTimeOnQueue != null && estimatedProcessingTime != null - && transferURL != null && remainingPinTime != null; - - if (!ok) { - throw new InvalidPtGChunkDataAttributesException(requestToken, fromSURL, - storageSystemInfo, lifeTime, fileStorageType, spaceToken, dirOption, - transferProtocols, fileSize, status, estimatedWaitTimeOnQueue, - estimatedProcessingTime, transferURL, remainingPinTime); - } - this.requestToken = requestToken; - this.fromSURL = fromSURL; - this.storageSystemInfo = storageSystemInfo; - this.lifeTime = lifeTime; - this.fileStorageType = fileStorageType; - this.spaceToken = spaceToken; - this.dirOption = dirOption; - this.transferProtocols = transferProtocols; - this.fileSize = fileSize; - this.status = status; - this.estimatedWaitTimeOnQueue = estimatedWaitTimeOnQueue; - this.estimatedProcessingTime = estimatedProcessingTime; - this.transferURL = transferURL; - } - - /** - * Method that returns the requestToken of the srm request to which this chunk - * belongs. - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the fromSURL of the srm request to which this chunk - * belongs. - */ - public TSURL fromSURL() { - - return fromSURL; - } - - /** - * Method that returns the storageSystemInfo of the srm request to which this - * chunk belongs - */ - public TStorageSystemInfo storageSystemInfo() { - - return storageSystemInfo; - } - - /** - * Method that returns the requested pin life time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds lifeTime() { - - return lifeTime; - } - - /** - * Method that returns the filerequested pin life time for this chunk of the - * srm request. - */ - public TFileStorageType fileStorageType() { - - return fileStorageType; - } - - /** - * Method that returns the space token supplied for this chunk of the srm - * request. - */ - public TSpaceToken spaceToken() { - - return spaceToken; - } - - /** - * Method that returns the dirOption specified in the srm request. - */ - public TDirOption dirOption() { - - return dirOption; - } - - /** - * Method that returns a TURLPrefix containing the transfer protocols desired - * for this chunk of the srm request. - */ - public TURLPrefix transferProtocols() { - - return transferProtocols; - } - - /** - * Method that returns the status for this chunk of the srm request. - */ - public TReturnStatus status() { - - return status; - } - - /** - * Method that returns the file size for this chunk of the srm request. - */ - public TSizeInBytes fileSize() { - - return fileSize; - } - - /** - * Method that returns the estimated time in queue for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds estimatedWaitTimeOnQueue() { - - return estimatedWaitTimeOnQueue; - } - - /** - * Method that returns the estimated processing time for this chunk of the srm - * request. - */ - public TLifeTimeInSeconds estimatedProcessingTime() { - - return estimatedProcessingTime; - } - - /** - * Method that returns the TURL for this chunk of the srm request. - */ - public TTURL transferURL() { - - return transferURL; - } - - /** - * Method that returns the estimated remaining pin time for this chunk of the - * srm request. - */ - public TLifeTimeInSeconds remainingPinTime() { - - return remainingPinTime; - } - - /** - * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_REQUEST_QUEUED(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); - } - - /** - * Method that sets the status of this request to SRM_DONE; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_DONE(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_DONE, explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_REQUEST; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INVALID_REQUEST(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); - } - - /** - * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; - * it needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - explanation); - } - - /** - * Method that sets the status of this request to SRM_ABORTED; it needs the - * explanation String which describes the situation in greater detail; if a - * null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_ABORTED(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_ABORTED, explanation); - } - - /** - * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); - } - - /** - * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INTERNAL_ERROR(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); - } - - /** - * Method that sets the status of this request to SRM_FATAL_INTERNAL_ERROR; it - * needs the explanation String which describes the situation in greater - * detail; if a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_FATAL_INTERNAL_ERROR(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_FATAL_INTERNAL_ERROR, - explanation); - } - - /** - * Method that sets the status of this request to SRM_INVALID_PATH; it needs - * the explanation String which describes the situation in greater detail; if - * a null is passed, then an empty String is used as explanation. - */ - public void changeStatusSRM_INVALID_PATH(String explanation) { - - if (explanation == null) { - explanation = ""; - } - status = new TReturnStatus(TStatusCode.SRM_INVALID_PATH, explanation); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("PtGChunkData\n"); - sb.append("RequestToken="); - sb.append(requestToken); - sb.append("; "); - sb.append("fromSURL="); - sb.append(fromSURL); - sb.append("; "); - sb.append("storageSystemInfo="); - sb.append(storageSystemInfo); - sb.append("; "); - sb.append("lifeTime="); - sb.append(lifeTime); - sb.append("; "); - sb.append("fileStorageType="); - sb.append(fileStorageType); - sb.append("; "); - sb.append("spaceToken"); - sb.append(spaceToken); - sb.append("; "); - sb.append("dirOption="); - sb.append(dirOption); - sb.append("; "); - sb.append("transferProtocols="); - sb.append(transferProtocols); - sb.append("; "); - sb.append("fileSize="); - sb.append(fileSize); - sb.append("; "); - sb.append("status="); - sb.append(status); - sb.append("; "); - sb.append("estimatedWaitTimeOnQueue="); - sb.append(estimatedWaitTimeOnQueue); - sb.append("; "); - sb.append("estimatedProcessingTime="); - sb.append(estimatedProcessingTime); - sb.append("; "); - sb.append("transferURL="); - sb.append(transferURL); - sb.append("; "); - sb.append("remainingPinTime="); - sb.append(remainingPinTime); - sb.append("."); - return sb.toString(); - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + requestToken.hashCode(); - hash = 37 * hash + fromSURL.hashCode(); - hash = 37 * hash + storageSystemInfo.hashCode(); - hash = 37 * hash + lifeTime.hashCode(); - hash = 37 * hash + fileStorageType.hashCode(); - hash = 37 * hash + spaceToken.hashCode(); - hash = 37 * hash + dirOption.hashCode(); - hash = 37 * hash + transferProtocols.hashCode(); - hash = 37 * hash + fileSize.hashCode(); - hash = 37 * hash + status.hashCode(); - hash = 37 * hash + estimatedWaitTimeOnQueue.hashCode(); - hash = 37 * hash + estimatedProcessingTime.hashCode(); - hash = 37 * hash + transferURL.hashCode(); - hash = 37 * hash + remainingPinTime.hashCode(); - return hash; - } - - @Override - public boolean equals(Object o) { - - if (o == this) { - return true; - } - if (!(o instanceof PtGChunkTO)) { - return false; - } - PtGChunkTO cd = (PtGChunkTO) o; - return requestToken.equals(cd.requestToken) && fromSURL.equals(cd.fromSURL) - && storageSystemInfo.equals(cd.storageSystemInfo) - && lifeTime.equals(cd.lifeTime) - && fileStorageType.equals(cd.fileStorageType) - && spaceToken.equals(cd.spaceToken) && dirOption.equals(cd.dirOption) - && transferProtocols.equals(cd.transferProtocols) - && fileSize.equals(cd.fileSize) && status.equals(cd.status) - && estimatedWaitTimeOnQueue.equals(cd.estimatedWaitTimeOnQueue) - && estimatedProcessingTime.equals(cd.estimatedProcessingTime) - && transferURL.equals(cd.transferURL) - && remainingPinTime.equals(cd.remainingPinTime); - } - - /** - * Method used to set the size of the file corresponding to the requested - * SURL. If the supplied TSizeInByte is null, the nothing gets set! - */ - public TSizeInBytes setFileSize(final TSizeInBytes size) { - - if (size != null) { - fileSize = size; - } - return null; - }; - - /** - * Method used to set the estimated time that the chunk will spend on the - * queue. If the supplied TLifeTimeInSeconds is null, then nothing gets set! - */ - public void setEstimatedWaitTimeOnQueue(final TLifeTimeInSeconds time) { - - if (time != null) { - estimatedWaitTimeOnQueue = time; - } - }; - - /** - * Method used to set the estimated time the processing will take. If the - * supplied TLifeTimeInSeconds is null, then nothing gets set! - */ - public void setEstimatedProcessingTime(final TLifeTimeInSeconds time) { - - if (time != null) { - estimatedProcessingTime = time; - } - }; - - /** - * Method used to set the transferURL associated to the SURL of this chunk. If - * TTURL is null, then nothing gets set! - */ - public void setTransferURL(final TTURL turl) { - - if (turl != null) { - transferURL = turl; - } - }; - - /** - * Method used in the mechanism for suspending and resuming a request. To be - * implemented! For now it always returns 0. - */ - public int getProgressCounter() { - - return 0; - }; -} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGData.java b/src/main/java/it/grid/storm/persistence/model/PtGData.java new file mode 100644 index 000000000..499508bc1 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGData.java @@ -0,0 +1,37 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TSizeInBytes; + +public interface PtGData extends FileTransferData { + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds getPinLifeTime(); + + /** + * Method that returns the dirOption specified in the srm request. + */ + public TDirOption getDirOption(); + + /** + * Method that returns the file size for this chunk of the srm request. + */ + public TSizeInBytes getFileSize(); + + /** + * Method used to set the size of the file corresponding to the requested SURL. If the supplied + * TSizeInByte is null, then nothing gets set! + */ + public void setFileSize(TSizeInBytes size); + + /** + * Method that sets the status of this request to SRM_FILE_PINNED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + public void changeStatusSRM_FILE_PINNED(String explanation); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java new file mode 100644 index 000000000..b6f3cd763 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtGPersistentChunkData.java @@ -0,0 +1,209 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtGPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TDirOption; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TStatusCode; +import it.grid.storm.srm.types.TTURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a PrepareToGetChunkData, that is part of a multifile PrepareToGet srm + * request. It contains data about: the requestToken, the fromSURL, the requested lifeTime of + * pinning, the TDirOption which tells whether the requested SURL is a directory and if it must be + * recursed at all levels, as well as the desired number of levels to recurse, the desired + * transferProtocols in order of preference, the fileSize, and the transferURL for the supplied + * SURL. + * + * @author EGRID - ICTP Trieste + * @date March 21st, 2005 + * @version 3.0 + */ +public class PtGPersistentChunkData extends IdentityPtGData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(PtGPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Get table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private TRequestToken requestToken; + + /** + * @param requestToken + * @param fromSURL + * @param lifeTime + * @param dirOption + * @param desiredProtocols + * @param fileSize + * @param status + * @param transferURL + * @throws InvalidPtGDataAttributesException + */ + public PtGPersistentChunkData(GridUserInterface auth, TRequestToken requestToken, TSURL fromSURL, + TLifeTimeInSeconds lifeTime, TDirOption dirOption, TURLPrefix desiredProtocols, + TSizeInBytes fileSize, TReturnStatus status, TTURL transferURL) + throws InvalidPtGDataAttributesException, InvalidPtGDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(auth, fromSURL, lifeTime, dirOption, desiredProtocols, fileSize, status, transferURL); + if (requestToken == null) { + log.debug("PtGPersistentChunkData: requestToken is null!"); + throw new InvalidPtGPersistentChunkDataAttributesException(requestToken, fromSURL, lifeTime, + dirOption, desiredProtocols, fileSize, status, transferURL); + } + + this.requestToken = requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + @Override + public TRequestToken getRequestToken() { + + return requestToken; + } + + /** + * Method that sets the status of this request to SRM_FILE_PINNED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + public void changeStatusSRM_FILE_PINNED(String explanation) { + + setStatus(TStatusCode.SRM_FILE_PINNED, explanation); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PtGPersistentChunkData other = (PtGPersistentChunkData) obj; + if (primaryKey != other.primaryKey) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtGPersistentChunkData [primaryKey="); + builder.append(primaryKey); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", pinLifeTime="); + builder.append(pinLifeTime); + builder.append(", dirOption="); + builder.append(dirOption); + builder.append(", fileSize="); + builder.append(fileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java new file mode 100644 index 000000000..c589f8bdb --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPChunkDataTO.java @@ -0,0 +1,340 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.config.Configuration; +import it.grid.storm.namespace.model.Protocol; +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.OverwriteModeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.persistence.converter.TransferProtocolListConverter; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TStatusCode; + +import java.sql.Timestamp; +import java.util.List; + +/** + * Class that represents a row in the Persistence Layer: this is all raw data referring to the + * PtPChunkData proper, that is, String and primitive types. + * + * Each field is initialized with default values as per SRM 2.2 specification: protocolList GSIFTP + * fileStorageType VOLATILE overwriteMode NEVER status SRM_REQUEST_QUEUED + * + * All other fields are 0 if int, or a white space if String. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class PtPChunkDataTO { + + private static final String FQAN_SEPARATOR = "#"; + /* Database table request_Get fields BEGIN */ + private long primaryKey = -1; // ID primary key of status_Put record in DB + private String toSURL = " "; + private long expectedFileSize = 0; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + /* Database table request_Get fields END */ + + private String requestToken = " "; + private int pinLifetime = -1; + private int fileLifetime = -1; + private String fileStorageType = null; // initialised in constructor + private String spaceToken = " "; + private List protocolList = null; // initialised in constructor + private String overwriteOption = null; // initialised in constructor + private int status; // initialised in constructor + private String errString = " "; + private String turl = " "; + private Timestamp timeStamp = null; + + private String clientDN = null; + private String vomsAttributes = null; + + + public PtPChunkDataTO() { + + this.fileStorageType = FileStorageTypeConverter.getInstance() + .toDB(TFileStorageType + .getTFileStorageType(Configuration.getInstance().getDefaultFileStorageType())); + TURLPrefix protocolPreferences = new TURLPrefix(); + protocolPreferences.addProtocol(Protocol.GSIFTP); + this.protocolList = TransferProtocolListConverter.toDB(protocolPreferences); + this.overwriteOption = OverwriteModeConverter.getInstance().toDB(TOverwriteMode.NEVER); + this.status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + } + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + requestToken = s; + } + + public Timestamp timeStamp() { + + return timeStamp; + } + + public void setTimeStamp(Timestamp timeStamp) { + + this.timeStamp = timeStamp; + } + + public String toSURL() { + + return toSURL; + } + + public void setToSURL(String s) { + + toSURL = s; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the surlUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + /** + * @param surlUniqueID the surlUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + public int pinLifetime() { + + return pinLifetime; + } + + public void setPinLifetime(int n) { + + pinLifetime = n; + } + + public int fileLifetime() { + + return fileLifetime; + } + + public void setFileLifetime(int n) { + + fileLifetime = n; + } + + public String fileStorageType() { + + return fileStorageType; + } + + /** + * Method that sets the FileStorageType: if it is null nothing gets set. The deafult value is + * Permanent. + */ + public void setFileStorageType(String s) { + + if (s != null) + fileStorageType = s; + } + + public String spaceToken() { + + return spaceToken; + } + + public void setSpaceToken(String s) { + + spaceToken = s; + } + + public long expectedFileSize() { + + return expectedFileSize; + } + + public void setExpectedFileSize(long l) { + + expectedFileSize = l; + } + + public List protocolList() { + + return protocolList; + } + + public void setProtocolList(List l) { + + if ((l != null) && (!l.isEmpty())) + protocolList = l; + } + + public String overwriteOption() { + + return overwriteOption; + } + + /** + * Method that sets the OverwriteMode: if it is null nothing gets set. The deafult value is Never. + */ + public void setOverwriteOption(String s) { + + if (s != null) + overwriteOption = s; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String transferURL() { + + return turl; + } + + public void setTransferURL(String s) { + + turl = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + vomsAttributes = s; + } + + public void setVomsAttributes(String[] fqaNsAsString) { + + vomsAttributes = ""; + for (int i = 0; i < fqaNsAsString.length; i++) { + vomsAttributes += fqaNsAsString[i]; + if (i < fqaNsAsString.length - 1) { + vomsAttributes += FQAN_SEPARATOR; + } + } + + } + + public String[] vomsAttributesArray() { + + return vomsAttributes.split(FQAN_SEPARATOR); + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(requestToken); + sb.append(" "); + sb.append(toSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(pinLifetime); + sb.append(" "); + sb.append(fileLifetime); + sb.append(" "); + sb.append(fileStorageType); + sb.append(" "); + sb.append(spaceToken); + sb.append(" "); + sb.append(expectedFileSize); + sb.append(" "); + sb.append(protocolList); + sb.append(" "); + sb.append(overwriteOption); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(turl); + return sb.toString(); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java b/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java deleted file mode 100644 index b990f172b..000000000 --- a/src/main/java/it/grid/storm/persistence/model/PtPChunkTO.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - - -public class PtPChunkTO { - -} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPData.java b/src/main/java/it/grid/storm/persistence/model/PtPData.java new file mode 100644 index 000000000..1abb88798 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPData.java @@ -0,0 +1,55 @@ +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; + +public interface PtPData extends FileTransferData { + + /** + * Method that returns the space token supplied for this chunk of the srm request. + */ + public TSpaceToken getSpaceToken(); + + /** + * Method that returns the requested pin life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds pinLifetime(); + + /** + * Method that returns the requested file life time for this chunk of the srm request. + */ + public TLifeTimeInSeconds fileLifetime(); + + /** + * Method that returns the fileStorageType for this chunk of the srm request. + */ + public TFileStorageType fileStorageType(); + + /** + * Method that returns the knownSizeOfThisFile supplied with this chunk of the srm request. + */ + public TSizeInBytes expectedFileSize(); + + /** + * Method that returns the overwriteOption specified in the srm request. + */ + public TOverwriteMode overwriteOption(); + + /** + * Method that sets the status of this request to SRM_SPACE_AVAILABLE; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + public void changeStatusSRM_SPACE_AVAILABLE(String explanation); + + /** + * Method that sets the status of this request to SRM_DUPLICATION_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + public void changeStatusSRM_DUPLICATION_ERROR(String explanation); + +} diff --git a/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java new file mode 100644 index 000000000..c8cfb173e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/PtPPersistentChunkData.java @@ -0,0 +1,191 @@ +/* + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by + * applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS + * OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ + +package it.grid.storm.persistence.model; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import it.grid.storm.common.types.TURLPrefix; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidFileTransferDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidPtPPersistentChunkDataAttributesException; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TOverwriteMode; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TSizeInBytes; +import it.grid.storm.srm.types.TSpaceToken; +import it.grid.storm.srm.types.TTURL; + +/** + * This class represents a PrepareToPutChunkData, that is part of a multifile PrepareToPut srm + * request. It contains data about: the requestToken, the toSURL, the requested lifeTime of pinning, + * the requested lifetime of volatile, the requested fileStorageType and any available spaceToken, + * the expectedFileSize, the desired transferProtocols in order of preference, the overwriteOption + * to be applied in case the file already exists, the transferURL for the supplied SURL. + * + * @author EGRID - ICTP Trieste + * @date June, 2005 + * @version 2.0 + */ +public class PtPPersistentChunkData extends IdentityPtPData implements PersistentChunkData { + + private static final Logger log = LoggerFactory.getLogger(PtPPersistentChunkData.class); + + /** + * long representing the primary key for the persistence layer, in the status_Put table + */ + private long primaryKey = -1; + + /** + * This is the requestToken of the multifile srm request to which this chunk belongs + */ + private final TRequestToken requestToken; + + public PtPPersistentChunkData(GridUserInterface auth, TRequestToken requestToken, TSURL toSURL, + TLifeTimeInSeconds pinLifetime, TLifeTimeInSeconds fileLifetime, + TFileStorageType fileStorageType, TSpaceToken spaceToken, TSizeInBytes expectedFileSize, + TURLPrefix transferProtocols, TOverwriteMode overwriteOption, TReturnStatus status, + TTURL transferURL) + throws InvalidPtPPersistentChunkDataAttributesException, InvalidPtPDataAttributesException, + InvalidFileTransferDataAttributesException, InvalidSurlRequestDataAttributesException { + + super(auth, toSURL, pinLifetime, fileLifetime, fileStorageType, spaceToken, expectedFileSize, + transferProtocols, overwriteOption, status, transferURL); + if (requestToken == null) { + log.debug("PtPPersistentChunkData: requestToken is null!"); + throw new InvalidPtPPersistentChunkDataAttributesException(requestToken, toSURL, pinLifetime, + fileLifetime, fileStorageType, spaceToken, expectedFileSize, transferProtocols, + overwriteOption, status, transferURL); + } + this.requestToken = requestToken; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + @Override + public long getPrimaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the requestToken of the srm request to which this chunk belongs. + */ + @Override + public TRequestToken getRequestToken() { + + return requestToken; + } + + @Override + public long getIdentifier() { + + return getPrimaryKey(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (int) (primaryKey ^ (primaryKey >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PtPPersistentChunkData other = (PtPPersistentChunkData) obj; + if (primaryKey != other.primaryKey) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("PtPPersistentChunkData [primaryKey="); + builder.append(primaryKey); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", fileStorageType="); + builder.append(fileStorageType); + builder.append(", overwriteOption="); + builder.append(overwriteOption); + builder.append(", expectedFileSize="); + builder.append(expectedFileSize); + builder.append(", transferProtocols="); + builder.append(transferProtocols); + builder.append(", SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append(", transferURL="); + builder.append(transferURL); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java b/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java deleted file mode 100644 index a2ade8cd6..000000000 --- a/src/main/java/it/grid/storm/persistence/model/RecallTaskType.java +++ /dev/null @@ -1,2 +0,0 @@ -package it.grid.storm.persistence.model; - diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java new file mode 100644 index 000000000..d500fc562 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkData.java @@ -0,0 +1,141 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedBoLChunkDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedBringOnLineChunkData, that is part of a multifile PrepareToGet srm + * request. It is closely related to BoLChunkData but it is called Reduced because it only contains + * the fromSURL, the current TReturnStatus, and the primary key of the request. + * + * This class is intended to be used by srmReleaseFiles, where only a limited amunt of information + * is needed instead of full blown BoLChunkData. + * + * @author CNAF + * @date Aug 2009 + * @version 1.0 + */ +public class ReducedBoLChunkData implements ReducedChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedBoLChunkData.class); + + private long primaryKey = -1; // long representing the primary key for the + // persistence layer! + private TSURL fromSURL; // SURL that the srm command wants to get + private TReturnStatus status; // return status for this chunk of request + + public ReducedBoLChunkData(TSURL fromSURL, TReturnStatus status) + throws InvalidReducedBoLChunkDataAttributesException { + + boolean ok = status != null && fromSURL != null; + if (!ok) { + throw new InvalidReducedBoLChunkDataAttributesException(fromSURL, status); + } + this.fromSURL = fromSURL; + this.status = status; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedBoLChunkData)) { + return false; + } + ReducedBoLChunkData cd = (ReducedBoLChunkData) o; + return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) + && status.equals(cd.status); + } + + /** + * Method that returns the fromSURL of the srm request to which this chunk belongs. + */ + public TSURL fromSURL() { + + return fromSURL; + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + new Long(primaryKey).hashCode(); + hash = 37 * hash + fromSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + return hash; + } + + public boolean isPinned() { + + if (status.getStatusCode() == TStatusCode.SRM_SUCCESS) { + return true; + } + return false; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedBoLChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("fromSURL="); + sb.append(fromSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("."); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java new file mode 100644 index 000000000..96004be8e --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedBoLChunkDataTO.java @@ -0,0 +1,130 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.converter.StatusCodeConverter; +import it.grid.storm.srm.types.TStatusCode; + +/** + * Class that represents some of the fields in a row in the Persistence Layer: this is all raw data + * referring to the ReducedBoLChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date November, 2006 + */ +public class ReducedBoLChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(TStatusCode.SRM_REQUEST_QUEUED); + private String errString = " "; + + public String errString() { + + return errString; + } + + public String fromSURL() { + + return fromSURL; + } + + public long primaryKey() { + + return primaryKey; + } + + public void setErrString(String s) { + + errString = s; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public void setStatus(int n) { + + status = n; + } + + public int status() { + + return status; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java similarity index 79% rename from src/main/java/it/grid/storm/catalogs/ReducedChunkData.java rename to src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java index a0a97affa..b6b0eae1f 100644 --- a/src/main/java/it/grid/storm/catalogs/ReducedChunkData.java +++ b/src/main/java/it/grid/storm/persistence/model/ReducedChunkData.java @@ -15,21 +15,21 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; public interface ReducedChunkData { - public TSURL fromSURL(); + public TSURL fromSURL(); - public boolean isPinned(); + public boolean isPinned(); - public long primaryKey(); + public long primaryKey(); - public void setPrimaryKey(long l); + public void setPrimaryKey(long l); - public TReturnStatus status(); + public TReturnStatus status(); } diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java new file mode 100644 index 000000000..8f8f9d09f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkData.java @@ -0,0 +1,140 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedPtGChunkDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedPrepareToGetChunkData, that is part of a multifile PrepareToGet + * srm request. It is closely related to PtGChunkData but it is called Reduced because it only + * contains the fromSURL, the current TReturnStatus, and the primary key of the request. + * + * This class is intended to be used by srmReleaseFiles, where only a limited amunt of information + * is needed instead of full blown PtGChunkData. + * + * @author EGRID - ICTP Trieste + * @date November, 2006 + * @version 1.0 + */ +public class ReducedPtGChunkData implements ReducedChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedPtGChunkData.class); + + private long primaryKey = -1; // long representing the primary key for the + // persistence layer! + private TSURL fromSURL; // SURL that the srm command wants to get + private TReturnStatus status; // return status for this chunk of request + + public ReducedPtGChunkData(TSURL fromSURL, TReturnStatus status) + throws InvalidReducedPtGChunkDataAttributesException { + + if (status == null || fromSURL == null) { + throw new InvalidReducedPtGChunkDataAttributesException(fromSURL, status); + } + this.fromSURL = fromSURL; + this.status = status; + } + + /** + * Method that returns the fromSURL of the srm request to which this chunk belongs. + */ + public TSURL fromSURL() { + + return fromSURL; + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + new Long(primaryKey).hashCode(); + hash = 37 * hash + fromSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + return hash; + } + + public boolean isPinned() { + + if (status.getStatusCode() == TStatusCode.SRM_FILE_PINNED) { + return true; + } + return false; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedPtGChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("fromSURL="); + sb.append(fromSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append("."); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedPtGChunkData)) { + return false; + } + ReducedPtGChunkData cd = (ReducedPtGChunkData) o; + return (primaryKey == cd.primaryKey) && fromSURL.equals(cd.fromSURL) + && status.equals(cd.status); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java new file mode 100644 index 000000000..fe95415ee --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtGChunkDataTO.java @@ -0,0 +1,131 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import it.grid.storm.persistence.converter.StatusCodeConverter; + +/** + * Class that represents some of the fileds in a row in the Persistence Layer: this is all raw data + * referring to the ReducedPtGChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date November, 2006 + */ +public class ReducedPtGChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String fromSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(SRM_REQUEST_QUEUED); + private String errString = " "; + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String fromSURL() { + + return fromSURL; + } + + public void setFromSURL(String s) { + + fromSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(fromSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java new file mode 100644 index 000000000..b2d1c0bf4 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkData.java @@ -0,0 +1,167 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.exceptions.InvalidReducedPtPChunkDataAttributesException; +import it.grid.storm.srm.types.TFileStorageType; +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class represents a ReducedPrepareToPutChunkData, that is part of a multifile PrepareToPut + * srm request. It is closely related to PtPChunkData but it is called Reduced because it only + * contains the toSURL, the current TReturnStatus, the TFileStorageType, the FileLifeTime in case of + * Volatile, the VomsGridUser limited to the DN, and the primary key of the request. + * + * This class is intended to be used by srmPutDone, where only a limited amount of information is + * needed instead of full blown PtPChunkData. It is also used by the automatic handlnig of non + * invoked srmPutDone, during transition to SRM_FILE_LIFETIME_EXPIRED. + * + * @author EGRID - ICTP Trieste + * @date January, 2007 + * @version 2.0 + */ +public class ReducedPtPChunkData { + + @SuppressWarnings("unused") + private static final Logger log = LoggerFactory.getLogger(ReducedPtPChunkData.class); + + private long primaryKey = -1; // long representing the primary key for the + // persistence layer! + private TSURL toSURL; // SURL that the srm command wants to get + private TReturnStatus status; // return status for this chunk of request + private TFileStorageType fileStorageType; // fileStorageType of this shunk of + // the request + private TLifeTimeInSeconds fileLifetime; // requested lifetime for SURL in + // case of Volatile entry. + + public ReducedPtPChunkData(TSURL toSURL, TReturnStatus status, TFileStorageType fileStorageType, + TLifeTimeInSeconds fileLifetime) throws InvalidReducedPtPChunkDataAttributesException { + + if (status == null || toSURL == null || fileStorageType == null || fileLifetime == null) { + throw new InvalidReducedPtPChunkDataAttributesException(toSURL, status, fileStorageType, + fileLifetime); + } + this.toSURL = toSURL; + this.status = status; + this.fileStorageType = fileStorageType; + this.fileLifetime = fileLifetime; + } + + /** + * Method used to get the primary key used in the persistence layer! + */ + public long primaryKey() { + + return primaryKey; + } + + /** + * Method used to set the primary key to be used in the persistence layer! + */ + public void setPrimaryKey(long l) { + + primaryKey = l; + } + + /** + * Method that returns the toSURL of the srm request to which this chunk belongs. + */ + public TSURL toSURL() { + + return toSURL; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + public TReturnStatus status() { + + return status; + } + + /** + * Method that returns the TFileStorageType of the srm request to which this chunk belongs. + */ + public TFileStorageType fileStorageType() { + + return fileStorageType; + } + + /** + * Method that returns the fileLifetime of the srm request to which this chunk belongs. + */ + public TLifeTimeInSeconds fileLifetime() { + + return fileLifetime; + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append("ReducedPtPChunkData\n"); + sb.append("primaryKey="); + sb.append(primaryKey); + sb.append("; "); + sb.append("toSURL="); + sb.append(toSURL); + sb.append("; "); + sb.append("status="); + sb.append(status); + sb.append(";"); + sb.append("fileStorageType="); + sb.append(fileStorageType); + sb.append(";"); + sb.append("fileLifetime="); + sb.append(fileLifetime); + sb.append("."); + return sb.toString(); + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + new Long(primaryKey).hashCode(); + hash = 37 * hash + toSURL.hashCode(); + hash = 37 * hash + status.hashCode(); + hash = 37 * hash + fileStorageType.hashCode(); + hash = 37 * hash + fileLifetime.hashCode(); + return hash; + } + + @Override + public boolean equals(Object o) { + + if (o == this) { + return true; + } + if (!(o instanceof ReducedPtPChunkData)) { + return false; + } + ReducedPtPChunkData cd = (ReducedPtPChunkData) o; + return (primaryKey == cd.primaryKey) && toSURL.equals(cd.toSURL) && status.equals(cd.status) + && fileStorageType.equals(cd.fileStorageType) && fileLifetime.equals(cd.fileLifetime); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java new file mode 100644 index 000000000..24c80221f --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/ReducedPtPChunkDataTO.java @@ -0,0 +1,162 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import static it.grid.storm.srm.types.TFileStorageType.VOLATILE; +import static it.grid.storm.srm.types.TStatusCode.SRM_REQUEST_QUEUED; + +import it.grid.storm.persistence.converter.FileStorageTypeConverter; +import it.grid.storm.persistence.converter.StatusCodeConverter; + +/** + * Class that represents some of the fields in a row in the Persistence Layer: this is all raw data + * referring to the ReducedPtPChunkData proper, that is String and primitive types. + * + * @author EGRID ICTP + * @version 1.0 + * @date January, 2007 + */ +public class ReducedPtPChunkDataTO { + + private long primaryKey = -1; // ID primary key of record in DB + private String toSURL = " "; + private String normalizedStFN = null; + private Integer surlUniqueID = null; + + private int status = StatusCodeConverter.getInstance().toDB(SRM_REQUEST_QUEUED); + private String errString = " "; + private String fileStorageType = FileStorageTypeConverter.getInstance().toDB(VOLATILE); + private int fileLifetime = -1; + + public long primaryKey() { + + return primaryKey; + } + + public void setPrimaryKey(long n) { + + primaryKey = n; + } + + public String toSURL() { + + return toSURL; + } + + public void setToSURL(String s) { + + toSURL = s; + } + + /** + * @param normalizedStFN the normalizedStFN to set + */ + public void setNormalizedStFN(String normalizedStFN) { + + this.normalizedStFN = normalizedStFN; + } + + /** + * @return the normalizedStFN + */ + public String normalizedStFN() { + + return normalizedStFN; + } + + /** + * @param surlUniqueID the sURLUniqueID to set + */ + public void setSurlUniqueID(Integer surlUniqueID) { + + this.surlUniqueID = surlUniqueID; + } + + /** + * @return the sURLUniqueID + */ + public Integer surlUniqueID() { + + return surlUniqueID; + } + + public int status() { + + return status; + } + + public void setStatus(int n) { + + status = n; + } + + public String errString() { + + return errString; + } + + public void setErrString(String s) { + + errString = s; + } + + public String fileStorageType() { + + return fileStorageType; + } + + /** + * Method that sets the FileStorageType: if it is null nothing gets set. The deafult value is + * Volatile. + */ + public void setFileStorageType(String s) { + + if (s != null) + fileStorageType = s; + } + + public int fileLifetime() { + + return fileLifetime; + } + + public void setFileLifetime(int n) { + + fileLifetime = n; + } + + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(primaryKey); + sb.append(" "); + sb.append(toSURL); + sb.append(" "); + sb.append(normalizedStFN); + sb.append(" "); + sb.append(surlUniqueID); + sb.append(" "); + sb.append(status); + sb.append(" "); + sb.append(errString); + sb.append(" "); + sb.append(fileStorageType); + sb.append(" "); + return sb.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/RequestData.java b/src/main/java/it/grid/storm/persistence/model/RequestData.java similarity index 94% rename from src/main/java/it/grid/storm/catalogs/RequestData.java rename to src/main/java/it/grid/storm/persistence/model/RequestData.java index 5b937f891..28083638c 100644 --- a/src/main/java/it/grid/storm/catalogs/RequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/RequestData.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; @@ -23,8 +23,8 @@ import it.grid.storm.synchcall.data.InputData; /** - * Class that represents a generic chunk. It provides only one method which is - * the primary key associated ot the chunk in persistence. + * Class that represents a generic chunk. It provides only one method which is the primary key + * associated ot the chunk in persistence. * * @author EGRID - ICTP Trieste * @version 1.0 diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java new file mode 100644 index 000000000..ceb1ce710 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryData.java @@ -0,0 +1,524 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.srm.types.TLifeTimeInSeconds; +import it.grid.storm.srm.types.TRequestToken; +import it.grid.storm.srm.types.TRequestType; +import it.grid.storm.srm.types.TReturnStatus; +// import it.grid.storm.griduser.VomsGridUser; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidRequestSummaryDataAttributesException; + +/** + * This class represents the SummaryData associated with the SRM request. It contains info about: + * Primary Key of request, TRequestType, TRequestToken, VomsGridUser. + * + * @author EGRID - ICTP Trieste + * @date March 18th, 2005 + * @version 4.0 + */ +public class RequestSummaryData { + + private TRequestType requestType = null; // request type of SRM request + private TRequestToken requestToken = null; // TRequestToken of SRM request + private GridUserInterface gu = null; // VomsGridUser that issued This request + private long id = -1; // long representing This object in persistence + + private String userToken = null; + private Integer retrytime = null; + private TLifeTimeInSeconds pinLifetime = null; + private String spaceToken = null; + private TReturnStatus status = null; + private String errstring = null; + private Integer remainingTotalTime = null; + private Integer nbreqfiles = null; + private Integer numOfCompleted = null; + private TLifeTimeInSeconds fileLifetime = null; + private Integer deferredStartTime = null; + private Integer numOfWaiting = null; + private Integer numOfFailed = null; + private Integer remainingDeferredStartTime = null; + + public RequestSummaryData(TRequestType rtype, TRequestToken rtoken, GridUserInterface gu) + throws InvalidRequestSummaryDataAttributesException { + + boolean ok = rtype != null && rtoken != null && gu != null; + if (!ok) + throw new InvalidRequestSummaryDataAttributesException(rtype, rtoken, gu); + this.requestType = rtype; + this.requestToken = rtoken; + this.gu = gu; + } + + /** + * Method that returns the type of SRM request + */ + public TRequestType requestType() { + + return requestType; + } + + /** + * Method that returns the SRM request TRequestToken + */ + public TRequestToken requestToken() { + + return requestToken; + } + + /** + * Method that returns the VomsGridUser that issued this request + */ + public GridUserInterface gridUser() { + + return gu; + } + + /** + * Method that returns a long corresponding to the identifier of This object in persistence. + */ + public long primaryKey() { + + return id; + } + + /** + * Method used to set the log corresponding to the identifier of This object in persistence. + */ + public void setPrimaryKey(long l) { + + this.id = l; + } + + /** + * @return the userToken + */ + public String getUserToken() { + + return userToken; + } + + /** + * @return the retrytime + */ + public Integer getRetrytime() { + + return retrytime; + } + + /** + * @return the pinLifetime + */ + public TLifeTimeInSeconds getPinLifetime() { + + return pinLifetime; + } + + /** + * @return the spaceToken + */ + public String getSpaceToken() { + + return spaceToken; + } + + /** + * @return the status + */ + public TReturnStatus getStatus() { + + return status; + } + + /** + * @return the errstring + */ + public String getErrstring() { + + return errstring; + } + + /** + * @return the remainingTotalTime + */ + public Integer getRemainingTotalTime() { + + return remainingTotalTime; + } + + /** + * @return the nbreqfiles + */ + public Integer getNbreqfiles() { + + return nbreqfiles; + } + + /** + * @return the numOfCompleted + */ + public Integer getNumOfCompleted() { + + return numOfCompleted; + } + + /** + * @return the fileLifetime + */ + public TLifeTimeInSeconds getFileLifetime() { + + return fileLifetime; + } + + /** + * @return the deferredStartTime + */ + public Integer getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * @return the numOfWaiting + */ + public Integer getNumOfWaiting() { + + return numOfWaiting; + } + + /** + * @return the numOfFailed + */ + public Integer getNumOfFailed() { + + return numOfFailed; + } + + /** + * @return the remainingDeferredStartTime + */ + public Integer getRemainingDeferredStartTime() { + + return remainingDeferredStartTime; + } + + public void setUserToken(String userToken) { + + this.userToken = userToken; + } + + public void setRetrytime(Integer retrytime) { + + this.retrytime = retrytime; + + } + + public void setPinLifetime(TLifeTimeInSeconds pinLifetime) { + + this.pinLifetime = pinLifetime; + + } + + public void setSpaceToken(String spaceToken) { + + this.spaceToken = spaceToken; + + } + + public void setStatus(TReturnStatus status) { + + this.status = status; + + } + + public void setErrstring(String errstring) { + + this.errstring = errstring; + + } + + public void setRemainingTotalTime(Integer remainingTotalTime) { + + this.remainingTotalTime = remainingTotalTime; + + } + + public void setNbreqfiles(Integer nbreqfiles) { + + this.nbreqfiles = nbreqfiles; + + } + + public void setNumOfCompleted(Integer numOfCompleted) { + + this.numOfCompleted = numOfCompleted; + + } + + public void setFileLifetime(TLifeTimeInSeconds fileLifetime) { + + this.fileLifetime = fileLifetime; + + } + + public void setDeferredStartTime(Integer deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + + } + + public void setNumOfWaiting(Integer numOfWaiting) { + + this.numOfWaiting = numOfWaiting; + + } + + public void setNumOfFailed(Integer numOfFailed) { + + this.numOfFailed = numOfFailed; + + } + + public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { + + this.remainingDeferredStartTime = remainingDeferredStartTime; + + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("RequestSummaryData [requestType="); + builder.append(requestType); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", gu="); + builder.append(gu); + builder.append(", id="); + builder.append(id); + builder.append(", userToken="); + builder.append(userToken); + builder.append(", retrytime="); + builder.append(retrytime); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", status="); + builder.append(status); + builder.append(", errstring="); + builder.append(errstring); + builder.append(", remainingTotalTime="); + builder.append(remainingTotalTime); + builder.append(", nbreqfiles="); + builder.append(nbreqfiles); + builder.append(", numOfCompleted="); + builder.append(numOfCompleted); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", deferredStartTime="); + builder.append(deferredStartTime); + builder.append(", numOfWaiting="); + builder.append(numOfWaiting); + builder.append(", numOfFailed="); + builder.append(numOfFailed); + builder.append(", remainingDeferredStartTime="); + builder.append(remainingDeferredStartTime); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((deferredStartTime == null) ? 0 : deferredStartTime.hashCode()); + result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); + result = prime * result + ((fileLifetime == null) ? 0 : fileLifetime.hashCode()); + result = prime * result + ((gu == null) ? 0 : gu.hashCode()); + result = prime * result + (int) (id ^ (id >>> 32)); + result = prime * result + ((nbreqfiles == null) ? 0 : nbreqfiles.hashCode()); + result = prime * result + ((numOfCompleted == null) ? 0 : numOfCompleted.hashCode()); + result = prime * result + ((numOfFailed == null) ? 0 : numOfFailed.hashCode()); + result = prime * result + ((numOfWaiting == null) ? 0 : numOfWaiting.hashCode()); + result = prime * result + ((pinLifetime == null) ? 0 : pinLifetime.hashCode()); + result = prime * result + + ((remainingDeferredStartTime == null) ? 0 : remainingDeferredStartTime.hashCode()); + result = prime * result + ((remainingTotalTime == null) ? 0 : remainingTotalTime.hashCode()); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + result = prime * result + ((requestType == null) ? 0 : requestType.hashCode()); + result = prime * result + ((retrytime == null) ? 0 : retrytime.hashCode()); + result = prime * result + ((spaceToken == null) ? 0 : spaceToken.hashCode()); + result = prime * result + ((status == null) ? 0 : status.hashCode()); + result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RequestSummaryData other = (RequestSummaryData) obj; + if (deferredStartTime == null) { + if (other.deferredStartTime != null) { + return false; + } + } else if (!deferredStartTime.equals(other.deferredStartTime)) { + return false; + } + if (errstring == null) { + if (other.errstring != null) { + return false; + } + } else if (!errstring.equals(other.errstring)) { + return false; + } + if (fileLifetime == null) { + if (other.fileLifetime != null) { + return false; + } + } else if (!fileLifetime.equals(other.fileLifetime)) { + return false; + } + if (gu == null) { + if (other.gu != null) { + return false; + } + } else if (!gu.equals(other.gu)) { + return false; + } + if (id != other.id) { + return false; + } + if (nbreqfiles == null) { + if (other.nbreqfiles != null) { + return false; + } + } else if (!nbreqfiles.equals(other.nbreqfiles)) { + return false; + } + if (numOfCompleted == null) { + if (other.numOfCompleted != null) { + return false; + } + } else if (!numOfCompleted.equals(other.numOfCompleted)) { + return false; + } + if (numOfFailed == null) { + if (other.numOfFailed != null) { + return false; + } + } else if (!numOfFailed.equals(other.numOfFailed)) { + return false; + } + if (numOfWaiting == null) { + if (other.numOfWaiting != null) { + return false; + } + } else if (!numOfWaiting.equals(other.numOfWaiting)) { + return false; + } + if (pinLifetime == null) { + if (other.pinLifetime != null) { + return false; + } + } else if (!pinLifetime.equals(other.pinLifetime)) { + return false; + } + if (remainingDeferredStartTime == null) { + if (other.remainingDeferredStartTime != null) { + return false; + } + } else if (!remainingDeferredStartTime.equals(other.remainingDeferredStartTime)) { + return false; + } + if (remainingTotalTime == null) { + if (other.remainingTotalTime != null) { + return false; + } + } else if (!remainingTotalTime.equals(other.remainingTotalTime)) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + if (requestType != other.requestType) { + return false; + } + if (retrytime == null) { + if (other.retrytime != null) { + return false; + } + } else if (!retrytime.equals(other.retrytime)) { + return false; + } + if (spaceToken == null) { + if (other.spaceToken != null) { + return false; + } + } else if (!spaceToken.equals(other.spaceToken)) { + return false; + } + if (status == null) { + if (other.status != null) { + return false; + } + } else if (!status.equals(other.status)) { + return false; + } + if (userToken == null) { + if (other.userToken != null) { + return false; + } + } else if (!userToken.equals(other.userToken)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java new file mode 100644 index 000000000..911148f38 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/RequestSummaryDataTO.java @@ -0,0 +1,533 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import java.sql.Timestamp; + +/** + * Class that represents data of an asynchrnous Request, regardless of whether it is a Put, Get or + * Copy, in the Persistence Layer: this is all raw data referring to the request proper, that is, + * String and primitive types. + * + * @author EGRID ICTP + * @version 2.0 + * @date June 2005 + */ +public class RequestSummaryDataTO { + + public static final String PTG_REQUEST_TYPE = "PTG"; + public static final String PTP_REQUEST_TYPE = "PTP"; + public static final String BOL_REQUEST_TYPE = "BOL"; + public static final String COPY_REQUEST_TYPE = "COP"; + + private long id = -1; // id of request in persistence + private String requestType = ""; // request type + private String requestToken = ""; // request token + private String clientDN = ""; // DN that issued request + private String vomsAttributes = ""; // String containing all VOMS attributes + private Timestamp timestamp = null; + + private boolean empty = true; + private String userToken = null; + private Integer retrytime = null; + private Integer pinLifetime = null; + private String spaceToken = null; + private Integer status = null; + private String errstring = null; + private Integer remainingTotalTime = null; + private Integer nbreqfiles = null; + private Integer numOfCompleted = null; + private Integer fileLifetime = null; + private Integer deferredStartTime = null; + private Integer numOfWaiting = null; + private Integer numOfFailed = null; + private Integer remainingDeferredStartTime = null; + + public boolean isEmpty() { + + return empty; + } + + public long primaryKey() { + + return id; + } + + public void setPrimaryKey(long l) { + + empty = false; + id = l; + } + + public String requestType() { + + return requestType; + } + + public void setRequestType(String s) { + + empty = false; + requestType = s; + } + + public String requestToken() { + + return requestToken; + } + + public void setRequestToken(String s) { + + empty = false; + requestToken = s; + } + + public String clientDN() { + + return clientDN; + } + + public void setClientDN(String s) { + + empty = false; + clientDN = s; + } + + public String vomsAttributes() { + + return vomsAttributes; + } + + public void setVomsAttributes(String s) { + + empty = false; + vomsAttributes = s; + } + + public Timestamp timestamp() { + + return timestamp; + } + + public void setTimestamp(Timestamp timestamp) { + + empty = false; + this.timestamp = timestamp; + } + + /** + * @return the userToken + */ + public String getUserToken() { + + return userToken; + } + + /** + * @return the retrytime + */ + public Integer getRetrytime() { + + return retrytime; + } + + /** + * @return the pinLifetime + */ + public Integer getPinLifetime() { + + return pinLifetime; + } + + /** + * @return the spaceToken + */ + public String getSpaceToken() { + + return spaceToken; + } + + /** + * @return the status + */ + public Integer getStatus() { + + return status; + } + + /** + * @return the errstring + */ + public String getErrstring() { + + return errstring; + } + + /** + * @return the remainingTotalTime + */ + public Integer getRemainingTotalTime() { + + return remainingTotalTime; + } + + /** + * @return the nbreqfiles + */ + public Integer getNbreqfiles() { + + return nbreqfiles; + } + + /** + * @return the numOfCompleted + */ + public Integer getNumOfCompleted() { + + return numOfCompleted; + } + + /** + * @return the fileLifetime + */ + public Integer getFileLifetime() { + + return fileLifetime; + } + + /** + * @return the deferredStartTime + */ + public Integer getDeferredStartTime() { + + return deferredStartTime; + } + + /** + * @return the numOfWaiting + */ + public Integer getNumOfWaiting() { + + return numOfWaiting; + } + + /** + * @return the numOfFailed + */ + public Integer getNumOfFailed() { + + return numOfFailed; + } + + /** + * @return the remainingDeferredStartTime + */ + public Integer getRemainingDeferredStartTime() { + + return remainingDeferredStartTime; + } + + public void setUserToken(String userToken) { + + this.userToken = userToken; + } + + public void setRetrytime(Integer retrytime) { + + this.retrytime = retrytime; + + } + + public void setPinLifetime(Integer pinLifetime) { + + this.pinLifetime = pinLifetime; + + } + + public void setSpaceToken(String spaceToken) { + + this.spaceToken = spaceToken; + + } + + public void setStatus(Integer status) { + + this.status = status; + + } + + public void setErrstring(String errstring) { + + this.errstring = errstring; + + } + + public void setRemainingTotalTime(Integer remainingTotalTime) { + + this.remainingTotalTime = remainingTotalTime; + + } + + public void setNbreqfiles(Integer nbreqfiles) { + + this.nbreqfiles = nbreqfiles; + + } + + public void setNumOfCompleted(Integer numOfCompleted) { + + this.numOfCompleted = numOfCompleted; + + } + + public void setFileLifetime(Integer fileLifetime) { + + this.fileLifetime = fileLifetime; + + } + + public void setDeferredStartTime(Integer deferredStartTime) { + + this.deferredStartTime = deferredStartTime; + + } + + public void setNumOfWaiting(Integer numOfWaiting) { + + this.numOfWaiting = numOfWaiting; + + } + + public void setNumOfFailed(Integer numOfFailed) { + + this.numOfFailed = numOfFailed; + + } + + public void setRemainingDeferredStartTime(Integer remainingDeferredStartTime) { + + this.remainingDeferredStartTime = remainingDeferredStartTime; + + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("RequestSummaryDataTO [id="); + builder.append(id); + builder.append(", requestType="); + builder.append(requestType); + builder.append(", requestToken="); + builder.append(requestToken); + builder.append(", clientDN="); + builder.append(clientDN); + builder.append(", vomsAttributes="); + builder.append(vomsAttributes); + builder.append(", timestamp="); + builder.append(timestamp); + builder.append(", empty="); + builder.append(empty); + builder.append(", userToken="); + builder.append(userToken); + builder.append(", retrytime="); + builder.append(retrytime); + builder.append(", pinLifetime="); + builder.append(pinLifetime); + builder.append(", spaceToken="); + builder.append(spaceToken); + builder.append(", status="); + builder.append(status); + builder.append(", errstring="); + builder.append(errstring); + builder.append(", remainingTotalTime="); + builder.append(remainingTotalTime); + builder.append(", nbreqfiles="); + builder.append(nbreqfiles); + builder.append(", numOfCompleted="); + builder.append(numOfCompleted); + builder.append(", fileLifetime="); + builder.append(fileLifetime); + builder.append(", deferredStartTime="); + builder.append(deferredStartTime); + builder.append(", numOfWaiting="); + builder.append(numOfWaiting); + builder.append(", numOfFailed="); + builder.append(numOfFailed); + builder.append(", remainingDeferredStartTime="); + builder.append(remainingDeferredStartTime); + builder.append("]"); + return builder.toString(); + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((clientDN == null) ? 0 : clientDN.hashCode()); + result = prime * result + (int) (deferredStartTime ^ (deferredStartTime >>> 32)); + result = prime * result + (empty ? 1231 : 1237); + result = prime * result + ((errstring == null) ? 0 : errstring.hashCode()); + result = prime * result + (int) (fileLifetime ^ (fileLifetime >>> 32)); + result = prime * result + (int) (id ^ (id >>> 32)); + result = prime * result + (int) (nbreqfiles ^ (nbreqfiles >>> 32)); + result = prime * result + (int) (numOfCompleted ^ (numOfCompleted >>> 32)); + result = prime * result + (int) (numOfFailed ^ (numOfFailed >>> 32)); + result = prime * result + (int) (numOfWaiting ^ (numOfWaiting >>> 32)); + result = prime * result + (int) (pinLifetime ^ (pinLifetime >>> 32)); + result = + prime * result + (int) (remainingDeferredStartTime ^ (remainingDeferredStartTime >>> 32)); + result = prime * result + (int) (remainingTotalTime ^ (remainingTotalTime >>> 32)); + result = prime * result + ((requestToken == null) ? 0 : requestToken.hashCode()); + result = prime * result + ((requestType == null) ? 0 : requestType.hashCode()); + result = prime * result + (int) (retrytime ^ (retrytime >>> 32)); + result = prime * result + ((spaceToken == null) ? 0 : spaceToken.hashCode()); + result = prime * result + (int) (status ^ (status >>> 32)); + result = prime * result + ((timestamp == null) ? 0 : timestamp.hashCode()); + result = prime * result + ((userToken == null) ? 0 : userToken.hashCode()); + result = prime * result + ((vomsAttributes == null) ? 0 : vomsAttributes.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RequestSummaryDataTO other = (RequestSummaryDataTO) obj; + if (clientDN == null) { + if (other.clientDN != null) { + return false; + } + } else if (!clientDN.equals(other.clientDN)) { + return false; + } + if (deferredStartTime != other.deferredStartTime) { + return false; + } + if (empty != other.empty) { + return false; + } + if (errstring == null) { + if (other.errstring != null) { + return false; + } + } else if (!errstring.equals(other.errstring)) { + return false; + } + if (fileLifetime != other.fileLifetime) { + return false; + } + if (id != other.id) { + return false; + } + if (nbreqfiles != other.nbreqfiles) { + return false; + } + if (numOfCompleted != other.numOfCompleted) { + return false; + } + if (numOfFailed != other.numOfFailed) { + return false; + } + if (numOfWaiting != other.numOfWaiting) { + return false; + } + if (pinLifetime != other.pinLifetime) { + return false; + } + if (remainingDeferredStartTime != other.remainingDeferredStartTime) { + return false; + } + if (remainingTotalTime != other.remainingTotalTime) { + return false; + } + if (requestToken == null) { + if (other.requestToken != null) { + return false; + } + } else if (!requestToken.equals(other.requestToken)) { + return false; + } + if (requestType == null) { + if (other.requestType != null) { + return false; + } + } else if (!requestType.equals(other.requestType)) { + return false; + } + if (retrytime != other.retrytime) { + return false; + } + if (spaceToken == null) { + if (other.spaceToken != null) { + return false; + } + } else if (!spaceToken.equals(other.spaceToken)) { + return false; + } + if (status != other.status) { + return false; + } + if (timestamp == null) { + if (other.timestamp != null) { + return false; + } + } else if (!timestamp.equals(other.timestamp)) { + return false; + } + if (userToken == null) { + if (other.userToken != null) { + return false; + } + } else if (!userToken.equals(other.userToken)) { + return false; + } + if (vomsAttributes == null) { + if (other.vomsAttributes != null) { + return false; + } + } else if (!vomsAttributes.equals(other.vomsAttributes)) { + return false; + } + return true; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java b/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java deleted file mode 100644 index 5855875c4..000000000 --- a/src/main/java/it/grid/storm/persistence/model/RequestSummaryTO.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -import it.grid.storm.srm.types.TRequestToken; -import it.grid.storm.srm.types.TRequestType; - -/** - * This class represents the SummaryData associated with the SRM request, that - * is it contains info about: TRequestToken, TRequsetType, total files in this - * request, number of files in queue, number of files progressing, number of - * files finished, and whether the request is currently suspended. - * - * @author EGRID - ICTP Trieste - * @date March 18th, 2005 - * @version 3.0 - */ -public class RequestSummaryTO { - - private TRequestToken requestToken = null; // TRequestToken of SRM request - private TRequestType requestType = null; // request type of SRM request - private int totalFilesInThisRequest = 0; // total number of files in SRM - // request - private int numOfQueuedRequests = 0; // number of files in SRM request that - // are in queue - private int numOfProgressingRequests = 0; // number of files in SRM request - // that are still in progress - private int numFinished = 0; // number of files in SRM request whose - // processing has completed - private boolean isSuspended = false; // flag that indicates whether the SRM - // request is suspended - - public RequestSummaryTO(TRequestToken requestToken, TRequestType requestType, - int totalFilesInThisRequest, int numOfQueuedRequests, - int numOfProgressingRequests, int numFinished, boolean isSuspended) - throws InvalidRequestSummaryDataAttributesException { - - boolean ok = requestToken != null && requestType != null - && totalFilesInThisRequest >= 0 && numOfQueuedRequests >= 0 - && numOfProgressingRequests >= 0 && numFinished >= 0; - if (!ok) - throw new InvalidRequestSummaryDataAttributesException(requestToken, - requestType, totalFilesInThisRequest, numOfQueuedRequests, - numOfProgressingRequests, numFinished); - this.requestToken = requestToken; - this.requestType = requestType; - this.totalFilesInThisRequest = totalFilesInThisRequest; - this.numOfQueuedRequests = numOfQueuedRequests; - this.numOfProgressingRequests = numOfProgressingRequests; - this.numFinished = numFinished; - this.isSuspended = isSuspended; - } - - /** - * Method that returns the SRM request TRequestToken - */ - public TRequestToken requestToken() { - - return requestToken; - } - - /** - * Method that returns the type of SRM request - */ - public TRequestType requestType() { - - return requestType; - } - - /** - * Method that returns the total number of files in the SRM request - */ - public int totalFilesInThisRequest() { - - return totalFilesInThisRequest; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently in queue. - */ - public int numOfQueuedRequests() { - - return numOfQueuedRequests; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently in progress. - */ - public int numOfProgressingRequests() { - - return numOfProgressingRequests; - } - - /** - * Method that returns the number of files in the SRM request that are - * currently finished. - */ - public int numFinished() { - - return numFinished; - } - - /** - * Method that tells whether the SRM requst is suspended. - */ - public boolean isSuspended() { - - return isSuspended; - } - - /** - * Method that increments the counter for the number of files in queue. - */ - public void incNumOfQueuedRequests() { - - numOfQueuedRequests++; - } - - /** - * Methos used to decrement the counter fo the number of files in queue. - */ - public void decNumOfQueuedRequests() { - - numOfQueuedRequests--; - } - - /** - * Method used to increment the counter for the number of progressing - * requests. - */ - public void incNumOfProgressingRequests() { - - numOfProgressingRequests++; - } - - /** - * Method used to decrement the counter for the number of progressing - * requests. - */ - public void decNumOfProgressingRequests() { - - numOfProgressingRequests--; - } - - /** - * Method used to increment the counter for the number of total files in the - * request. - */ - public void incTotalFilesInThisRequest() { - - totalFilesInThisRequest++; - } - - /** - * Method used to decrement the counter fot the number of total files in this - * request. - */ - public void decTotalFilesInThisRequest() { - - totalFilesInThisRequest--; - } - - /** - * Method used to increment the counter for the processing of files that are - * currently finished. - */ - public void incNumFinished() { - - numFinished++; - } - - /** - * Method used to decrement the counter that keeps track of the number of - * files that are currently finished. - */ - public void decNumFinished() { - - numFinished--; - } - - /** - * Method used to set the SRM flag that signals the processing of the request - * this RequestSummaryData applies to, is suspended. - */ - public void srmSuspend() { - - isSuspended = true; - } - - /** - * Method used to set the SRM flag that signals the procesing of the request - * this RequestSummaryData applies to, is _not_ suspended - */ - public void srmUnSuspend() { - - isSuspended = false; - } - - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append("SummaryRequestData"); - sb.append("; requestToken="); - sb.append(requestToken); - sb.append("; requestType="); - sb.append(requestType); - sb.append("; totalFilesInThisRequest="); - sb.append(totalFilesInThisRequest); - sb.append("; numOfQueuedRequests="); - sb.append(numOfQueuedRequests); - sb.append("; numOfProgressingRequests="); - sb.append(numOfProgressingRequests); - sb.append("; numFinished="); - sb.append(numFinished); - sb.append("; isSuspended="); - sb.append(isSuspended); - sb.append("."); - return sb.toString(); - } - - public int hashCode() { - - int hash = 17; - hash = 37 * hash + requestToken.hashCode(); - hash = 37 * hash + requestType.hashCode(); - hash = 37 * hash + totalFilesInThisRequest; - hash = 37 * hash + numOfQueuedRequests; - hash = 37 * hash + numOfProgressingRequests; - hash = 37 * hash + numFinished; - hash = (isSuspended) ? (37 * hash + 1) : (37 * hash + 0); - return hash; - } - - public boolean equals(Object o) { - - if (o == this) - return true; - if (!(o instanceof RequestSummaryTO)) - return false; - RequestSummaryTO rsd = (RequestSummaryTO) o; - return requestToken.equals(rsd.requestToken) - && requestType.equals(rsd.requestType) - && (totalFilesInThisRequest == rsd.totalFilesInThisRequest) - && (numOfQueuedRequests == rsd.numOfQueuedRequests) - && (numOfProgressingRequests == rsd.numOfProgressingRequests) - && (numFinished == rsd.numFinished) && (isSuspended == rsd.isSuspended); - } -} diff --git a/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java b/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java deleted file mode 100644 index f517b4545..000000000 --- a/src/main/java/it/grid/storm/persistence/model/ResourceRuleData.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.model; - -public class ResourceRuleData { -} diff --git a/src/main/java/it/grid/storm/persistence/model/SQLHelper.java b/src/main/java/it/grid/storm/persistence/model/SQLHelper.java new file mode 100644 index 000000000..a5e3b8f55 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/SQLHelper.java @@ -0,0 +1,72 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import it.grid.storm.persistence.pool.MySqlFormat; +import it.grid.storm.persistence.pool.SQLFormat; + +public abstract class SQLHelper { + + private final SQLFormat formatter = new MySqlFormat(); + + public String format(Object value) { + + return formatter.format(value); + } + + /** + * + * @param value boolean + * @return String + */ + public String format(boolean value) { + + return formatter.format(new Boolean(value)); + } + + /** + * + * @param value int + * @return String + */ + public String format(int value) throws NumberFormatException { + + return formatter.format(new Integer(value)); + } + + /** + * + * @param value long + * @return String + */ + public String format(long value) throws NumberFormatException { + + return formatter.format(new Long(value)); + } + + /** + * + * @param date Date + * @return String + */ + public String format(java.util.Date date) { + + return formatter.format(date); + } + +} diff --git a/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java b/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java index dd2e68809..6c640d559 100644 --- a/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java +++ b/src/main/java/it/grid/storm/persistence/model/StorageSpaceTO.java @@ -43,475 +43,466 @@ */ public class StorageSpaceTO implements Serializable, Comparable { - private static final long serialVersionUID = -87317982494792808L; - - private static final Logger log = LoggerFactory - .getLogger(StorageSpaceTO.class); - - // ----- PRIMARY KEY ----// - private Long storageSpaceId = null; // Persistence Object IDentifier - - // ----- FIELDS ----// - private String ownerName = null; - private String voName = null; - private String spaceType = null; // `SPACE_TYPE` VARCHAR(10) NOT NULL default - // '' - private String alias = null; - private String spaceToken = null; - private String spaceFile = null; // `SPACE_FILE` VARCHAR(145) NOT NULL default - // '' - private long lifetime = -1L; // `LIFETIME` bigint(20) default NULL - private String storageInfo = null;// `STORAGE_INFO` VARCHAR(255) default NULL - private Date created = new Date(); - - private long totalSize = 0L; // `TOTAL_SIZE` bigint(20) NOT NULL default '0' - private long guaranteedSize = 0L; // `GUAR_SIZE` bigint(20) NOT NULL default - // '0' - private long freeSize = 0L; // `FREE_SIZE` bigint(20) default NULL - - private long usedSize = -1L; // `USED_SIZE` bigint(20) NOT NULL default '-1' - private long busySize = -1L; // `BUSY_SIZE` bigint(20) NOT NULL default '-1' - private long unavailableSize = -1L; // `UNAVAILABLE_SIZE` bigint(20) NOT NULL - // default '-1' - private long availableSize = -1L; // `AVAILABLE_SIZE` bigint(20) NOT NULL - // default '-1' - private long reservedSize = -1L; // `RESERVED_SIZE` bigint(20) NOT NULL - // default '-1' - private Date updateTime = null; - - // ********************** Constructor methods ********************** // - - /** - * No-arg constructor for JavaBean tools. - */ - public StorageSpaceTO() { - - super(); - } - - /** - * Constructor from Domain Object StorageSpaceData - * - * @param spaceData - * SpaceData - */ - public StorageSpaceTO(StorageSpaceData spaceData) { - - if (spaceData != null) { - log.debug("Building StorageSpaceTO with {}" , spaceData); - if (spaceData.getOwner() != null) { - ownerName = spaceData.getOwner().getDn(); - voName = getVOName(spaceData.getOwner()); - } - if (spaceData.getSpaceType() != null) { - spaceType = (spaceData.getSpaceType()).getValue(); - } - alias = spaceData.getSpaceTokenAlias(); - if (spaceData.getSpaceToken() != null) { - spaceToken = spaceData.getSpaceToken().getValue(); - } - spaceFile = spaceData.getSpaceFileNameString(); - if (spaceData.getTotalSpaceSize() != null) { - totalSize = spaceData.getTotalSpaceSize().value(); - } - if (spaceData.getTotalGuaranteedSize() != null) { - guaranteedSize = spaceData.getTotalGuaranteedSize().value(); - } - if (spaceData.getAvailableSpaceSize() != null) { - availableSize = spaceData.getAvailableSpaceSize().value(); - } - if (spaceData.getUsedSpaceSize() != null) { - usedSize = spaceData.getUsedSpaceSize().value(); - } - if (spaceData.getFreeSpaceSize() != null) { - freeSize = spaceData.getFreeSpaceSize().value(); - } - if (spaceData.getUnavailableSpaceSize() != null) { - unavailableSize = spaceData.getUnavailableSpaceSize().value(); - } - if (spaceData.getBusySpaceSize() != null) { - busySize = spaceData.getBusySpaceSize().value(); - } - if (spaceData.getReservedSpaceSize() != null) { - reservedSize = spaceData.getReservedSpaceSize().value(); - } - if (spaceData.getLifeTime() != null) { - lifetime = spaceData.getLifeTime().value(); - } - if (spaceData.getStorageInfo() != null) { - storageInfo = spaceData.getStorageInfo().getValue(); - } - if (spaceData.getCreationDate() != null) { - created = spaceData.getCreationDate(); - } - } - } - - // ************ HELPER Method *************** // - private String getVOName(GridUserInterface maker) { + private static final long serialVersionUID = -87317982494792808L; + + private static final Logger log = LoggerFactory.getLogger(StorageSpaceTO.class); + + // ----- PRIMARY KEY ----// + private Long storageSpaceId = null; // Persistence Object IDentifier + + // ----- FIELDS ----// + private String ownerName = null; + private String voName = null; + private String spaceType = null; // `SPACE_TYPE` VARCHAR(10) NOT NULL default + // '' + private String alias = null; + private String spaceToken = null; + private String spaceFile = null; // `SPACE_FILE` VARCHAR(145) NOT NULL default + // '' + private long lifetime = -1L; // `LIFETIME` bigint(20) default NULL + private String storageInfo = null;// `STORAGE_INFO` VARCHAR(255) default NULL + private Date created = new Date(); + + private long totalSize = 0L; // `TOTAL_SIZE` bigint(20) NOT NULL default '0' + private long guaranteedSize = 0L; // `GUAR_SIZE` bigint(20) NOT NULL default + // '0' + private long freeSize = 0L; // `FREE_SIZE` bigint(20) default NULL + + private long usedSize = -1L; // `USED_SIZE` bigint(20) NOT NULL default '-1' + private long busySize = -1L; // `BUSY_SIZE` bigint(20) NOT NULL default '-1' + private long unavailableSize = -1L; // `UNAVAILABLE_SIZE` bigint(20) NOT NULL + // default '-1' + private long availableSize = -1L; // `AVAILABLE_SIZE` bigint(20) NOT NULL + // default '-1' + private long reservedSize = -1L; // `RESERVED_SIZE` bigint(20) NOT NULL + // default '-1' + private Date updateTime = null; + + // ********************** Constructor methods ********************** // + + /** + * No-arg constructor for JavaBean tools. + */ + public StorageSpaceTO() { + + super(); + } + + /** + * Constructor from Domain Object StorageSpaceData + * + * @param spaceData SpaceData + */ + public StorageSpaceTO(StorageSpaceData spaceData) { + + if (spaceData != null) { + log.debug("Building StorageSpaceTO with {}", spaceData); + if (spaceData.getOwner() != null) { + ownerName = spaceData.getOwner().getDn(); + voName = getVOName(spaceData.getOwner()); + } + if (spaceData.getSpaceType() != null) { + spaceType = (spaceData.getSpaceType()).getValue(); + } + alias = spaceData.getSpaceTokenAlias(); + if (spaceData.getSpaceToken() != null) { + spaceToken = spaceData.getSpaceToken().getValue(); + } + spaceFile = spaceData.getSpaceFileNameString(); + if (spaceData.getTotalSpaceSize() != null) { + totalSize = spaceData.getTotalSpaceSize().value(); + } + if (spaceData.getTotalGuaranteedSize() != null) { + guaranteedSize = spaceData.getTotalGuaranteedSize().value(); + } + if (spaceData.getAvailableSpaceSize() != null) { + availableSize = spaceData.getAvailableSpaceSize().value(); + } + if (spaceData.getUsedSpaceSize() != null) { + usedSize = spaceData.getUsedSpaceSize().value(); + } + if (spaceData.getFreeSpaceSize() != null) { + freeSize = spaceData.getFreeSpaceSize().value(); + } + if (spaceData.getUnavailableSpaceSize() != null) { + unavailableSize = spaceData.getUnavailableSpaceSize().value(); + } + if (spaceData.getBusySpaceSize() != null) { + busySize = spaceData.getBusySpaceSize().value(); + } + if (spaceData.getReservedSpaceSize() != null) { + reservedSize = spaceData.getReservedSpaceSize().value(); + } + if (spaceData.getLifeTime() != null) { + lifetime = spaceData.getLifeTime().value(); + } + if (spaceData.getStorageInfo() != null) { + storageInfo = spaceData.getStorageInfo().getValue(); + } + if (spaceData.getCreationDate() != null) { + created = spaceData.getCreationDate(); + } + } + } - String voStr = VO.makeNoVo().getValue(); - if (maker instanceof AbstractGridUser) { - voStr = ((AbstractGridUser) maker).getVO().getValue(); - } - return voStr; - } + // ************ HELPER Method *************** // + private String getVOName(GridUserInterface maker) { - // ********************** Accessor Methods ********************** // + String voStr = VO.makeNoVo().getValue(); + if (maker instanceof AbstractGridUser) { + voStr = ((AbstractGridUser) maker).getVO().getValue(); + } + return voStr; + } - public Long getStorageSpaceId() { + // ********************** Accessor Methods ********************** // - return storageSpaceId; - } + public Long getStorageSpaceId() { - public void setStorageSpaceId(Long id) { + return storageSpaceId; + } - storageSpaceId = id; - } + public void setStorageSpaceId(Long id) { - // ------------------------------------- + storageSpaceId = id; + } - public String getOwnerName() { + // ------------------------------------- - return ownerName; - } + public String getOwnerName() { - public void setOwnerName(String ownerName) { + return ownerName; + } - this.ownerName = ownerName; - } + public void setOwnerName(String ownerName) { - // ------------------------------------- + this.ownerName = ownerName; + } - public String getVoName() { + // ------------------------------------- - return voName; - } + public String getVoName() { - public void setVoName(String voName) { + return voName; + } - this.voName = voName; - } + public void setVoName(String voName) { - // ------------------------------------- + this.voName = voName; + } - public String getSpaceType() { + // ------------------------------------- - return spaceType; - } + public String getSpaceType() { - public void setSpaceType(String spaceType) { + return spaceType; + } - this.spaceType = spaceType; - } + public void setSpaceType(String spaceType) { - // ------------------------------------- + this.spaceType = spaceType; + } - public long getGuaranteedSize() { + // ------------------------------------- - return guaranteedSize; - } + public long getGuaranteedSize() { - public void setGuaranteedSize(long guaranteedSize) { + return guaranteedSize; + } - this.guaranteedSize = guaranteedSize; - } + public void setGuaranteedSize(long guaranteedSize) { - // ------------------------------------- + this.guaranteedSize = guaranteedSize; + } - public long getTotalSize() { + // ------------------------------------- - return totalSize; - } + public long getTotalSize() { - public void setTotalSize(long totalSize) { + return totalSize; + } - this.totalSize = totalSize; - } + public void setTotalSize(long totalSize) { - // ------------------------------------- + this.totalSize = totalSize; + } - public void setSpaceToken(String spaceToken) { + // ------------------------------------- - this.spaceToken = spaceToken; - } + public void setSpaceToken(String spaceToken) { - public String getSpaceToken() { + this.spaceToken = spaceToken; + } - return spaceToken; - } + public String getSpaceToken() { - // ------------------------------------- + return spaceToken; + } - public void setAlias(String alias) { + // ------------------------------------- - this.alias = alias; - } + public void setAlias(String alias) { - public String getAlias() { + this.alias = alias; + } - return alias; - } + public String getAlias() { - // ------------------------------------- + return alias; + } - public void setSpaceFile(String spaceFile) { + // ------------------------------------- - this.spaceFile = spaceFile; - } + public void setSpaceFile(String spaceFile) { - public String getSpaceFile() { + this.spaceFile = spaceFile; + } - return spaceFile; - } + public String getSpaceFile() { - // ------------------------------------- + return spaceFile; + } - public long getLifetime() { + // ------------------------------------- - return lifetime; - } + public long getLifetime() { - public void setLifetime(long lifetime) { + return lifetime; + } - this.lifetime = lifetime; - } + public void setLifetime(long lifetime) { - // ------------------------------------- + this.lifetime = lifetime; + } - public String getStorageInfo() { + // ------------------------------------- - return storageInfo; - } + public String getStorageInfo() { - public void setStorageInfo(String storageInfo) { + return storageInfo; + } - this.storageInfo = storageInfo; - } + public void setStorageInfo(String storageInfo) { - // ------------------------------------- + this.storageInfo = storageInfo; + } - public Date getCreated() { + // ------------------------------------- - return created; - } + public Date getCreated() { - public void setCreated(Date date) { + return created; + } - created = date; - } + public void setCreated(Date date) { - // ------------------------------------- + created = date; + } - /** - * @return the freeSize - */ - public final long getFreeSize() { + // ------------------------------------- - return freeSize; - } + /** + * @return the freeSize + */ + public final long getFreeSize() { - /** - * @param freeSize - * the freeSize to set - */ - public final void setFreeSize(long freeSize) { + return freeSize; + } - this.freeSize = freeSize; - } + /** + * @param freeSize the freeSize to set + */ + public final void setFreeSize(long freeSize) { - /** - * @return the usedSize - */ - public final long getUsedSize() { + this.freeSize = freeSize; + } - return usedSize; - } + /** + * @return the usedSize + */ + public final long getUsedSize() { - /** - * @param usedSize - * the usedSize to set - */ - public final void setUsedSize(long usedSize) { + return usedSize; + } - this.usedSize = usedSize; - } + /** + * @param usedSize the usedSize to set + */ + public final void setUsedSize(long usedSize) { - /** - * @return the busySize - */ - public final long getBusySize() { + this.usedSize = usedSize; + } - return busySize; - } + /** + * @return the busySize + */ + public final long getBusySize() { - /** - * @param busySize - * the busySize to set - */ - public final void setBusySize(long busySize) { + return busySize; + } - this.busySize = busySize; - } + /** + * @param busySize the busySize to set + */ + public final void setBusySize(long busySize) { - /** - * @return the unavailableSize - */ - public final long getUnavailableSize() { + this.busySize = busySize; + } - return unavailableSize; - } + /** + * @return the unavailableSize + */ + public final long getUnavailableSize() { - /** - * @param unavailableSize - * the unavailableSize to set - */ - public final void setUnavailableSize(long unavailableSize) { + return unavailableSize; + } - this.unavailableSize = unavailableSize; - } - - /** - * @return the reservedSize - */ - public final long getReservedSize() { - - return reservedSize; - } - - /** - * @param reservedSize - * the reservedSize to set - */ - public final void setReservedSize(long reservedSize) { - - this.reservedSize = reservedSize; - } - - /** - * @param availableSize - * the availableSize to set - */ - public void setAvailableSize(long availableSize) { - - this.availableSize = availableSize; - } - - /** - * @return the availableSize - */ - public long getAvailableSize() { - - return availableSize; - } - - // ********************** Common Methods ********************** // - - /** - * @param updateTime - * the updateTime to set - */ - public void setUpdateTime(Date updateTime) { - - this.updateTime = updateTime; - } - - /** - * @return the updateTime - */ - public Date getUpdateTime() { - - return updateTime; - } - - @Override - public boolean equals(Object o) { - - if (o == null) { - return false; - } - if (o instanceof StorageSpaceTO) { - if (this == o) { - return true; - } - final StorageSpaceTO storageSpace = (StorageSpaceTO) o; - if (!spaceToken.equals(storageSpace.getSpaceToken())) { - return false; - } - if (!spaceFile.equals(storageSpace.getSpaceFile())) { - return false; - } - return true; - } else { - return false; - } - } - - @Override - public int hashCode() { - - int hash = 17; - hash = 37 * hash + spaceToken.hashCode(); - return hash; - - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - sb.append(" ==== STORAGE SPACE (token=" + spaceToken + ") ==== \n"); - sb.append(" STORAGE SPACE ID = " + storageSpaceId); - sb.append("\n"); - sb.append(" OWNER USER NAME = " + ownerName); - sb.append("\n"); - sb.append(" OWNER VO NAME = " + voName); - sb.append("\n"); - sb.append(" SPACE ALIAS NAME = " + alias); - sb.append("\n"); - sb.append(" SPACE TYPE = " + spaceType); - sb.append("\n"); - sb.append(" SPACE TOKEN = " + spaceToken); - sb.append("\n"); - sb.append(" SPACE FILE = " + spaceFile); - sb.append("\n"); - sb.append(" CREATED = " + created); - sb.append("\n"); - sb.append(" TOTAL SIZE = " + totalSize); - sb.append("\n"); - sb.append(" GUARANTEED SIZE = " + guaranteedSize); - sb.append("\n"); - sb.append(" FREE SIZE = " + freeSize); - sb.append("\n"); - sb.append(" USED SIZE = " + usedSize); - sb.append("\n"); - sb.append(" BUSY SIZE = " + busySize); - sb.append("\n"); - sb.append(" AVAILABLE = " + availableSize); - sb.append("\n"); - sb.append(" RESERVED = " + reservedSize); - sb.append("\n"); - sb.append(" UNAVAILABLE = " + unavailableSize); - sb.append("\n"); - sb.append(" LIFETIME (sec) = " + lifetime); - sb.append("\n"); - sb.append(" STORAGE INFO = " + storageInfo); - sb.append("\n"); - sb.append(" UPDATE TIME = " + updateTime); - sb.append("\n"); - sb.append(" NR STOR_FILES = "); - sb.append("\n"); - return sb.toString(); - } - - @Override - public int compareTo(StorageSpaceTO o) { - - if (o instanceof StorageSpaceTO) { - return getCreated().compareTo(((StorageSpaceTO) o).getCreated()); - } - return 0; - } - - // ********************** Business Methods ********************** // + /** + * @param unavailableSize the unavailableSize to set + */ + public final void setUnavailableSize(long unavailableSize) { + + this.unavailableSize = unavailableSize; + } + + /** + * @return the reservedSize + */ + public final long getReservedSize() { + + return reservedSize; + } + + /** + * @param reservedSize the reservedSize to set + */ + public final void setReservedSize(long reservedSize) { + + this.reservedSize = reservedSize; + } + + /** + * @param availableSize the availableSize to set + */ + public void setAvailableSize(long availableSize) { + + this.availableSize = availableSize; + } + + /** + * @return the availableSize + */ + public long getAvailableSize() { + + return availableSize; + } + + // ********************** Common Methods ********************** // + + /** + * @param updateTime the updateTime to set + */ + public void setUpdateTime(Date updateTime) { + + this.updateTime = updateTime; + } + + /** + * @return the updateTime + */ + public Date getUpdateTime() { + + return updateTime; + } + + @Override + public boolean equals(Object o) { + + if (o == null) { + return false; + } + if (o instanceof StorageSpaceTO) { + if (this == o) { + return true; + } + final StorageSpaceTO storageSpace = (StorageSpaceTO) o; + if (!spaceToken.equals(storageSpace.getSpaceToken())) { + return false; + } + if (!spaceFile.equals(storageSpace.getSpaceFile())) { + return false; + } + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + + int hash = 17; + hash = 37 * hash + spaceToken.hashCode(); + return hash; + + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + sb.append(" ==== STORAGE SPACE (token=" + spaceToken + ") ==== \n"); + sb.append(" STORAGE SPACE ID = " + storageSpaceId); + sb.append("\n"); + sb.append(" OWNER USER NAME = " + ownerName); + sb.append("\n"); + sb.append(" OWNER VO NAME = " + voName); + sb.append("\n"); + sb.append(" SPACE ALIAS NAME = " + alias); + sb.append("\n"); + sb.append(" SPACE TYPE = " + spaceType); + sb.append("\n"); + sb.append(" SPACE TOKEN = " + spaceToken); + sb.append("\n"); + sb.append(" SPACE FILE = " + spaceFile); + sb.append("\n"); + sb.append(" CREATED = " + created); + sb.append("\n"); + sb.append(" TOTAL SIZE = " + totalSize); + sb.append("\n"); + sb.append(" GUARANTEED SIZE = " + guaranteedSize); + sb.append("\n"); + sb.append(" FREE SIZE = " + freeSize); + sb.append("\n"); + sb.append(" USED SIZE = " + usedSize); + sb.append("\n"); + sb.append(" BUSY SIZE = " + busySize); + sb.append("\n"); + sb.append(" AVAILABLE = " + availableSize); + sb.append("\n"); + sb.append(" RESERVED = " + reservedSize); + sb.append("\n"); + sb.append(" UNAVAILABLE = " + unavailableSize); + sb.append("\n"); + sb.append(" LIFETIME (sec) = " + lifetime); + sb.append("\n"); + sb.append(" STORAGE INFO = " + storageInfo); + sb.append("\n"); + sb.append(" UPDATE TIME = " + updateTime); + sb.append("\n"); + sb.append(" NR STOR_FILES = "); + sb.append("\n"); + return sb.toString(); + } + + @Override + public int compareTo(StorageSpaceTO o) { + + if (o instanceof StorageSpaceTO) { + return getCreated().compareTo(((StorageSpaceTO) o).getCreated()); + } + return 0; + } + + // ********************** Business Methods ********************** // } diff --git a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java similarity index 96% rename from src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java index cc28d7818..8bcbd499f 100644 --- a/src/main/java/it/grid/storm/catalogs/SurlMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SurlMultyOperationRequestData.java @@ -1,6 +1,7 @@ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; @@ -46,8 +47,7 @@ public synchronized void store() { stored = true; } - private static Map buildSurlStatusMap(TSURL surl, - TReturnStatus status) { + private static Map buildSurlStatusMap(TSURL surl, TReturnStatus status) { if (surl == null || status == null) { throw new IllegalArgumentException( diff --git a/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java new file mode 100644 index 000000000..735ff0450 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/model/SurlRequestData.java @@ -0,0 +1,264 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.model; + +import java.util.Map; + +import it.grid.storm.persistence.exceptions.InvalidSurlRequestDataAttributesException; +import it.grid.storm.srm.types.TReturnStatus; +import it.grid.storm.srm.types.TSURL; +import it.grid.storm.srm.types.TStatusCode; + +/** + * @author Michele Dibenedetto + * + */ +public abstract class SurlRequestData implements RequestData { + + protected TSURL SURL; + protected TReturnStatus status; + + public SurlRequestData(TSURL toSURL, TReturnStatus status) + throws InvalidSurlRequestDataAttributesException { + + if (toSURL == null || status == null || status.getStatusCode() == null) { + throw new InvalidSurlRequestDataAttributesException(toSURL, status); + } + this.SURL = toSURL; + this.status = status; + } + + /** + * Method that returns the TURL for this chunk of the srm request. + */ + @Override + public final TSURL getSURL() { + + return SURL; + } + + /** + * Method that returns the status for this chunk of the srm request. + */ + @Override + public final TReturnStatus getStatus() { + + return status; + } + + /** + * Method used to set the Status associated to this chunk. If status is null, then nothing gets + * set! + */ + public void setStatus(TReturnStatus status) { + + if (status != null) { + this.status = status; + } + } + + protected void setStatus(TStatusCode statusCode, String explanation) { + + if (explanation == null) { + status = new TReturnStatus(statusCode); + } else { + status = new TReturnStatus(statusCode, explanation); + } + } + + /** + * Method that sets the status of this request to SRM_REQUEST_QUEUED; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_REQUEST_QUEUED(String explanation) { + + setStatus(TStatusCode.SRM_REQUEST_QUEUED, explanation); + } + + /** + * Method that sets the status of this request to SRM_REQUEST_INPROGRESS; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_REQUEST_INPROGRESS(String explanation) { + + setStatus(TStatusCode.SRM_REQUEST_INPROGRESS, explanation); + } + + /** + * Method that sets the status of this request to SRM_SUCCESS; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + @Override + public final void changeStatusSRM_SUCCESS(String explanation) { + + setStatus(TStatusCode.SRM_SUCCESS, explanation); + } + + /** + * Method that sets the status of this request to SRM_INTERNAL_ERROR; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_INTERNAL_ERROR(String explanation) { + + setStatus(TStatusCode.SRM_INTERNAL_ERROR, explanation); + } + + /** + * Method that sets the status of this request to SRM_INVALID_REQUEST; it needs the explanation + * String which describes the situation in greater detail; if a null is passed, then an empty + * String is used as explanation. + */ + @Override + public final void changeStatusSRM_INVALID_REQUEST(String explanation) { + + setStatus(TStatusCode.SRM_INVALID_REQUEST, explanation); + } + + /** + * Method that sets the status of this request to SRM_AUTHORIZATION_FAILURE; it needs the + * explanation String which describes the situation in greater detail; if a null is passed, then + * an empty String is used as explanation. + */ + @Override + public final void changeStatusSRM_AUTHORIZATION_FAILURE(String explanation) { + + setStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, explanation); + } + + /** + * Method that sets the status of this request to SRM_ABORTED; it needs the explanation String + * which describes the situation in greater detail; if a null is passed, then an empty String is + * used as explanation. + */ + @Override + public final void changeStatusSRM_ABORTED(String explanation) { + + setStatus(TStatusCode.SRM_ABORTED, explanation); + } + + @Override + public final void changeStatusSRM_FILE_BUSY(String explanation) { + + setStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + @Override + public final void changeStatusSRM_INVALID_PATH(String explanation) { + + setStatus(TStatusCode.SRM_INVALID_PATH, explanation); + } + + @Override + public final void changeStatusSRM_NOT_SUPPORTED(String explanation) { + + setStatus(TStatusCode.SRM_NOT_SUPPORTED, explanation); + } + + @Override + public final void changeStatusSRM_FAILURE(String explanation) { + + setStatus(TStatusCode.SRM_FAILURE, explanation); + } + + @Override + public final void changeStatusSRM_SPACE_LIFETIME_EXPIRED(String explanation) { + + setStatus(TStatusCode.SRM_SPACE_LIFETIME_EXPIRED, explanation); + } + + @Override + public String display(Map map) { + + // nonsense method + return ""; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + + final int prime = 31; + int result = 1; + result = prime * result + ((SURL == null) ? 0 : SURL.hashCode()); + result = prime * result + ((status == null) ? 0 : status.hashCode()); + return result; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object obj) { + + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SurlRequestData other = (SurlRequestData) obj; + if (SURL == null) { + if (other.SURL != null) { + return false; + } + } else if (!SURL.equals(other.SURL)) { + return false; + } + if (status == null) { + if (other.status != null) { + return false; + } + } else if (!status.equals(other.status)) { + return false; + } + return true; + } + + /* + * (non-Javadoc) + * + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + + StringBuilder builder = new StringBuilder(); + builder.append("SurlRequestData [SURL="); + builder.append(SURL); + builder.append(", status="); + builder.append(status); + builder.append("]"); + return builder.toString(); + } +} diff --git a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java similarity index 51% rename from src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java rename to src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java index c12c524f0..01581e03c 100644 --- a/src/main/java/it/grid/storm/catalogs/SynchMultyOperationRequestData.java +++ b/src/main/java/it/grid/storm/persistence/model/SynchMultyOperationRequestData.java @@ -1,10 +1,10 @@ -package it.grid.storm.catalogs; +package it.grid.storm.persistence.model; import it.grid.storm.srm.types.TRequestToken; public interface SynchMultyOperationRequestData extends RequestData { - public TRequestToken getGeneratedRequestToken(); + public TRequestToken getGeneratedRequestToken(); - public void store(); + public void store(); } diff --git a/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java b/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java index edb1a5fe8..af6943a21 100644 --- a/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java +++ b/src/main/java/it/grid/storm/persistence/model/TapeRecallTO.java @@ -21,15 +21,11 @@ import java.util.Calendar; import java.util.Date; import java.util.GregorianCalendar; -import java.util.Random; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.BOL; -import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.PTG; - import com.fasterxml.jackson.annotation.JsonIgnore; import it.grid.storm.srm.types.InvalidTRequestTokenAttributesException; @@ -38,423 +34,398 @@ public class TapeRecallTO implements Serializable, Comparable { - public enum RecallTaskType { - - PTG, BOL, BACK, RCLL; - } - - private static final Logger log = LoggerFactory.getLogger(TapeRecallTO.class); + public enum RecallTaskType { - private static final long serialVersionUID = -2907739786996767167L; + PTG, BOL, BACK, RCLL; + } - public static final String START_CHAR = ""; - public static final char SEPARATOR_CHAR = '\u0009'; - public static final String DATE_FORMAT = "dd-MM-yyyy HH.mm.ss"; + private static final Logger log = LoggerFactory.getLogger(TapeRecallTO.class); - private UUID taskId = null; - private TRequestToken requestToken = null; - private RecallTaskType requestType = null; - private String fileName = null; - private String userID = null; - private String voName = null; - private int pinLifetime = 0; - private TapeRecallStatus status = TapeRecallStatus.QUEUED; - private int retryAttempt = 0; - private Date insertionInstant = null; - private Date inProgressInstant = null; - private Date finalStateInstant = null; - private Date deferredRecallInstant = null; - private UUID groupTaskId = null; + private static final long serialVersionUID = -2907739786996767167L; - private final Calendar endOfTheWorld = new GregorianCalendar(2012, Calendar.DECEMBER, 21); + public static final String START_CHAR = ""; + public static final char SEPARATOR_CHAR = '\u0009'; + public static final String DATE_FORMAT = "dd-MM-yyyy HH.mm.ss"; - public static TapeRecallTO createRandom(Date date, String voName) { + private UUID taskId = null; + private TRequestToken requestToken = null; + private RecallTaskType requestType = null; + private String fileName = null; + private String userID = null; + private String voName = null; + private int pinLifetime = 0; + private TapeRecallStatus status = TapeRecallStatus.QUEUED; + private int retryAttempt = 0; + private Date insertionInstant = null; + private Date inProgressInstant = null; + private Date finalStateInstant = null; + private Date deferredRecallInstant = null; + private UUID groupTaskId = null; - TapeRecallTO result = new TapeRecallTO(); - Random r = new Random(); - result.setFileName("/root/" + voName + "/test/" + r.nextInt(1001)); - result.setRequestToken(TRequestToken.getRandom()); - if (r.nextInt(2) == 0) { - result.setRequestType(BOL); - } else { - result.setRequestType(PTG); - } - result.setUserID("FakeId"); - result.setRetryAttempt(0); - result.setPinLifetime(r.nextInt(1001)); - result.setVoName(voName); - result.setInsertionInstant(date); - int deferred = r.nextInt(2); - Date deferredRecallTime = new Date(date.getTime() + (deferred * (long) Math.random())); - result.setDeferredRecallInstant(deferredRecallTime); - result.setGroupTaskId(UUID.randomUUID()); - return result; - } + private final Calendar endOfTheWorld = new GregorianCalendar(2012, Calendar.DECEMBER, 21); - /* - * Implementing the natural order (by age) - */ - public int compareTo(TapeRecallTO arg0) { + /* + * Implementing the natural order (by age) + */ + public int compareTo(TapeRecallTO arg0) { - if (arg0 == null) { - return 0; - } - return insertionInstant.compareTo(arg0.getInsertionInstant()); - } + if (arg0 == null) { + return 0; + } + return insertionInstant.compareTo(arg0.getInsertionInstant()); + } - public Date getDeferredRecallInstant() { + public Date getDeferredRecallInstant() { - return deferredRecallInstant; - } + return deferredRecallInstant; + } - public String getFileName() { + public String getFileName() { - return fileName; - } + return fileName; + } - public Date getInsertionInstant() { + public Date getInsertionInstant() { - return insertionInstant; - } + return insertionInstant; + } - public Date getInProgressInstant() { + public Date getInProgressInstant() { - return inProgressInstant; - } + return inProgressInstant; + } - public Date getFinalStateInstant() { + public Date getFinalStateInstant() { - return finalStateInstant; - } + return finalStateInstant; + } - public int getPinLifetime() { + public int getPinLifetime() { - return pinLifetime; - } + return pinLifetime; + } - public TapeRecallStatus getStatus() { + public TapeRecallStatus getStatus() { - return status; - } + return status; + } - /** - * RequestToken is the primary key of the table - * - * @return - */ - public TRequestToken getRequestToken() { + /** + * RequestToken is the primary key of the table + * + * @return + */ + public TRequestToken getRequestToken() { - return requestToken; - } + return requestToken; + } - public RecallTaskType getRequestType() { + public RecallTaskType getRequestType() { - return requestType; - } + return requestType; + } - public int getRetryAttempt() { + public int getRetryAttempt() { - return retryAttempt; - } + return retryAttempt; + } - @JsonIgnore - public int getStatusId() { + @JsonIgnore + public int getStatusId() { - return status.getStatusId(); - } + return status.getStatusId(); + } - public UUID getTaskId() { + public UUID getTaskId() { - buildTaskId(); - return taskId; - } + buildTaskId(); + return taskId; + } - public UUID getGroupTaskId() { + public UUID getGroupTaskId() { - return groupTaskId; - } + return groupTaskId; + } - public String getUserID() { + public String getUserID() { - return userID; - } + return userID; + } - public String getVoName() { + public String getVoName() { - return voName; - } + return voName; + } - public void setDeferredRecallInstant(Date date) { + public void setDeferredRecallInstant(Date date) { - deferredRecallInstant = date; - } + deferredRecallInstant = date; + } - public void setFileName(String fileName) { + public void setFileName(String fileName) { - this.fileName = fileName; - buildTaskId(); - } + this.fileName = fileName; + buildTaskId(); + } - public void setInsertionInstant(Date date) { + public void setInsertionInstant(Date date) { - insertionInstant = date; - } + insertionInstant = date; + } - private void setInProgressInstant(Date date) { + private void setInProgressInstant(Date date) { - inProgressInstant = date; - } + inProgressInstant = date; + } - private void setFinalStateInstant(Date date) { + private void setFinalStateInstant(Date date) { - finalStateInstant = date; - } + finalStateInstant = date; + } - public void setPinLifetime(int pinLifetime) { + public void setPinLifetime(int pinLifetime) { - this.pinLifetime = pinLifetime; - } + this.pinLifetime = pinLifetime; + } - /** - * - * @param requestToken - */ - public void setRequestToken(TRequestToken requestToken) { + /** + * + * @param requestToken + */ + public void setRequestToken(TRequestToken requestToken) { - this.requestToken = requestToken; - } + this.requestToken = requestToken; + } - public void setRequestType(RecallTaskType requestType) { + public void setRequestType(RecallTaskType requestType) { - this.requestType = requestType; - } + this.requestType = requestType; + } - public void setRetryAttempt(int retryAttempt) { + public void setRetryAttempt(int retryAttempt) { - this.retryAttempt = retryAttempt; - } + this.retryAttempt = retryAttempt; + } - /** - * Sets the status of the recall task and if a transition is performed records the appropriate - * time-stamp - * - * @param status - */ - public void setStatus(TapeRecallStatus status) { + /** + * Sets the status of the recall task and if a transition is performed records the appropriate + * time-stamp + * + * @param status + */ + public void setStatus(TapeRecallStatus status) { - this.status = status; - if (this.status.equals(TapeRecallStatus.IN_PROGRESS) && this.inProgressInstant == null) { - this.setInProgressInstant(new Date()); - } else { - if (TapeRecallStatus.isFinalStatus(this.status.getStatusId()) - && this.inProgressInstant == null) { - this.setFinalStateInstant(new Date()); - } - } - } + this.status = status; + if (this.status.equals(TapeRecallStatus.IN_PROGRESS) && this.inProgressInstant == null) { + this.setInProgressInstant(new Date()); + } else { + if (TapeRecallStatus.isFinalStatus(this.status.getStatusId()) + && this.inProgressInstant == null) { + this.setFinalStateInstant(new Date()); + } + } + } - /** - * @param statusId - */ - public void setStatusId(int statusId) { + /** + * @param statusId + */ + public void setStatusId(int statusId) { - this.setStatus(TapeRecallStatus.getRecallTaskStatus(statusId)); - } + this.setStatus(TapeRecallStatus.getRecallTaskStatus(statusId)); + } - public void setTaskId(UUID taskId) { + public void setTaskId(UUID taskId) { - this.taskId = taskId; - } + this.taskId = taskId; + } - public void setGroupTaskId(UUID groupTaskId) { + public void setGroupTaskId(UUID groupTaskId) { - this.groupTaskId = groupTaskId; - } + this.groupTaskId = groupTaskId; + } - public void setUserID(String userID) { - - this.userID = userID; - } - - public void setVoName(String voName) { - - this.voName = voName; - } - - /** - * Does not print the taskId but the group task Id Does not print the state transition time - * stamps - * - * @return - */ - public String toGEMSS() { - - StringBuilder sb = new StringBuilder(); - - sb.append(START_CHAR); - sb.append(groupTaskId); - sb.append(SEPARATOR_CHAR); - - Format formatter = new SimpleDateFormat(DATE_FORMAT); - if (insertionInstant != null) { - sb.append(formatter.format(insertionInstant)); - } else { - insertionInstant = endOfTheWorld.getTime(); - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(requestType); - sb.append(SEPARATOR_CHAR); - sb.append(fileName); - sb.append(SEPARATOR_CHAR); - sb.append(voName); - sb.append(SEPARATOR_CHAR); - sb.append(userID); - sb.append(SEPARATOR_CHAR); - sb.append(retryAttempt); - sb.append(SEPARATOR_CHAR); - sb.append(status); - sb.append(SEPARATOR_CHAR); - - if (deferredRecallInstant != null) { - sb.append(formatter.format(deferredRecallInstant)); - } else { - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(pinLifetime); - sb.append(SEPARATOR_CHAR); - sb.append(requestToken); - sb.append(SEPARATOR_CHAR); - - if (inProgressInstant != null) - sb.append(formatter.format(inProgressInstant)); - else - sb.append("null"); - - return sb.toString(); - } - - @Override - public String toString() { - - StringBuilder sb = new StringBuilder(); - - sb.append(START_CHAR); - sb.append(taskId); - sb.append(SEPARATOR_CHAR); - - Format formatter = new SimpleDateFormat(DATE_FORMAT); - if (insertionInstant != null) { - sb.append(formatter.format(insertionInstant)); - } else { - insertionInstant = endOfTheWorld.getTime(); - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(requestType); - sb.append(SEPARATOR_CHAR); - sb.append(fileName); - sb.append(SEPARATOR_CHAR); - sb.append(voName); - sb.append(SEPARATOR_CHAR); - sb.append(userID); - sb.append(SEPARATOR_CHAR); - sb.append(retryAttempt); - sb.append(SEPARATOR_CHAR); - sb.append(status); - sb.append(SEPARATOR_CHAR); - - if (inProgressInstant != null) { - sb.append(formatter.format(inProgressInstant)); - } else { - sb.append("null"); - } - sb.append(SEPARATOR_CHAR); - - if (finalStateInstant != null) { - sb.append(formatter.format(finalStateInstant)); - } else { - sb.append("null"); - } - sb.append(SEPARATOR_CHAR); - - if (deferredRecallInstant != null) { - sb.append(formatter.format(deferredRecallInstant)); - } else { - sb.append(formatter.format(insertionInstant)); - } - - sb.append(SEPARATOR_CHAR); - sb.append(pinLifetime); - sb.append(SEPARATOR_CHAR); - sb.append(requestToken); - sb.append(SEPARATOR_CHAR); - sb.append(groupTaskId); - return sb.toString(); - } - - /** - * This method generate a TaskId from fileName - * - * @return - */ - private void buildTaskId() { - - if (this.fileName != null) { - this.taskId = buildTaskIdFromFileName(this.fileName); - } else { - log.error("Unable to create taskId because filename is NULL"); - } - } - - public static UUID buildTaskIdFromFileName(String fileName) { - - return UUID.nameUUIDFromBytes(fileName.getBytes()); - } - - /** - * Intended to be used when building this object from a database row NOTE: before to call this - * method, call the set status method - * - * @param inProgressInstant - * @param finalStateInstant - */ - public void forceStatusUpdateInstants(Date inProgressInstant, Date finalStateInstant) { - - if (inProgressInstant != null) { - if (this.status.equals(TapeRecallStatus.IN_PROGRESS) - || TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { - this.inProgressInstant = inProgressInstant; - } else { - log.error("Unable to force the in progress transition time-stamp. " - + "Invalid status: {}", status); - } - } - if (finalStateInstant != null) { - if (TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { - this.finalStateInstant = finalStateInstant; - } else { - log.error("Unable to force the in final status transition time-stamp. " - + "current status {} is not finale", status); - } - } - } - - public void setFakeRequestToken() { - - final String FAKE_PREFIX = "FAKE-"; - try { - this.setRequestToken(new TRequestToken( - FAKE_PREFIX - .concat(UUID.randomUUID().toString().substring(FAKE_PREFIX.length())), - Calendar.getInstance().getTime())); - } catch (InvalidTRequestTokenAttributesException e) { - log.error(e.getMessage(), e); - } - } + public void setUserID(String userID) { + + this.userID = userID; + } + + public void setVoName(String voName) { + + this.voName = voName; + } + + /** + * Does not print the taskId but the group task Id Does not print the state transition time stamps + * + * @return + */ + public String toGEMSS() { + + StringBuilder sb = new StringBuilder(); + + sb.append(START_CHAR); + sb.append(groupTaskId); + sb.append(SEPARATOR_CHAR); + + Format formatter = new SimpleDateFormat(DATE_FORMAT); + if (insertionInstant != null) { + sb.append(formatter.format(insertionInstant)); + } else { + insertionInstant = endOfTheWorld.getTime(); + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(requestType); + sb.append(SEPARATOR_CHAR); + sb.append(fileName); + sb.append(SEPARATOR_CHAR); + sb.append(voName); + sb.append(SEPARATOR_CHAR); + sb.append(userID); + sb.append(SEPARATOR_CHAR); + sb.append(retryAttempt); + sb.append(SEPARATOR_CHAR); + sb.append(status); + sb.append(SEPARATOR_CHAR); + + if (deferredRecallInstant != null) { + sb.append(formatter.format(deferredRecallInstant)); + } else { + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(pinLifetime); + sb.append(SEPARATOR_CHAR); + sb.append(requestToken); + sb.append(SEPARATOR_CHAR); + + if (inProgressInstant != null) + sb.append(formatter.format(inProgressInstant)); + else + sb.append("null"); + + return sb.toString(); + } + + @Override + public String toString() { + + StringBuilder sb = new StringBuilder(); + + sb.append(START_CHAR); + sb.append(taskId); + sb.append(SEPARATOR_CHAR); + + Format formatter = new SimpleDateFormat(DATE_FORMAT); + if (insertionInstant != null) { + sb.append(formatter.format(insertionInstant)); + } else { + insertionInstant = endOfTheWorld.getTime(); + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(requestType); + sb.append(SEPARATOR_CHAR); + sb.append(fileName); + sb.append(SEPARATOR_CHAR); + sb.append(voName); + sb.append(SEPARATOR_CHAR); + sb.append(userID); + sb.append(SEPARATOR_CHAR); + sb.append(retryAttempt); + sb.append(SEPARATOR_CHAR); + sb.append(status); + sb.append(SEPARATOR_CHAR); + + if (inProgressInstant != null) { + sb.append(formatter.format(inProgressInstant)); + } else { + sb.append("null"); + } + sb.append(SEPARATOR_CHAR); + + if (finalStateInstant != null) { + sb.append(formatter.format(finalStateInstant)); + } else { + sb.append("null"); + } + sb.append(SEPARATOR_CHAR); + + if (deferredRecallInstant != null) { + sb.append(formatter.format(deferredRecallInstant)); + } else { + sb.append(formatter.format(insertionInstant)); + } + + sb.append(SEPARATOR_CHAR); + sb.append(pinLifetime); + sb.append(SEPARATOR_CHAR); + sb.append(requestToken); + sb.append(SEPARATOR_CHAR); + sb.append(groupTaskId); + return sb.toString(); + } + + /** + * This method generate a TaskId from fileName + * + * @return + */ + private void buildTaskId() { + + if (this.fileName != null) { + this.taskId = buildTaskIdFromFileName(this.fileName); + } else { + log.error("Unable to create taskId because filename is NULL"); + } + } + + public static UUID buildTaskIdFromFileName(String fileName) { + + return UUID.nameUUIDFromBytes(fileName.getBytes()); + } + + /** + * Intended to be used when building this object from a database row NOTE: before to call this + * method, call the set status method + * + * @param inProgressInstant + * @param finalStateInstant + */ + public void forceStatusUpdateInstants(Date inProgressInstant, Date finalStateInstant) { + + if (inProgressInstant != null) { + if (this.status.equals(TapeRecallStatus.IN_PROGRESS) + || TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { + this.inProgressInstant = inProgressInstant; + } else { + log.error("Unable to force the in progress transition time-stamp. " + "Invalid status: {}", + status); + } + } + if (finalStateInstant != null) { + if (TapeRecallStatus.isFinalStatus(this.status.getStatusId())) { + this.finalStateInstant = finalStateInstant; + } else { + log.error("Unable to force the in final status transition time-stamp. " + + "current status {} is not finale", status); + } + } + } + + public void setFakeRequestToken() { + + final String FAKE_PREFIX = "FAKE-"; + try { + this.setRequestToken(new TRequestToken( + FAKE_PREFIX.concat(UUID.randomUUID().toString().substring(FAKE_PREFIX.length())), + Calendar.getInstance().getTime())); + } catch (InvalidTRequestTokenAttributesException e) { + log.error(e.getMessage(), e); + } + } } diff --git a/src/main/java/it/grid/storm/persistence/pool/DBConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/DBConnectionPool.java new file mode 100644 index 000000000..d06b6c73a --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DBConnectionPool.java @@ -0,0 +1,96 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +import static java.lang.String.valueOf; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Map; + +import org.apache.commons.dbcp2.BasicDataSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Maps; + +import it.grid.storm.config.Configuration; + +public class DBConnectionPool { + + private static final Logger log = LoggerFactory.getLogger(DBConnectionPool.class); + + private static final DataBaseStrategy MYSQL = + new DataBaseStrategy("mysql", "com.mysql.jdbc.Driver", "jdbc:mysql://", new MySqlFormat()); + + private BasicDataSource bds; + + public DBConnectionPool(Configuration c) { + this(c.getDbPoolSize(), c.getDbPoolMinIdle(), c.getDbPoolMaxWaitMillis(), + c.isDbPoolTestOnBorrow(), c.isDbPoolTestWhileIdle()); + } + + public DBConnectionPool(int maxTotal, int minIdle, int maxConnLifetimeMillis, + boolean isTestOnBorrow, boolean isTestWhileIdle) { + + // Set database driver name + bds.setDriverClassName(MYSQL.getDriverName()); + // Set database URL + bds.setUrl(MYSQL.getDbUrl()); + // Set database user + bds.setUsername(MYSQL.getDbUsr()); + // Set database password + bds.setPassword(MYSQL.getDbPwd()); + // Set the connection pool size + bds.setMaxTotal(maxTotal); + bds.setInitialSize(minIdle); + bds.setMinIdle(minIdle); + bds.setMaxConnLifetimeMillis(maxConnLifetimeMillis); + bds.setTestOnBorrow(isTestOnBorrow); + bds.setTestWhileIdle(isTestWhileIdle); + log.info( + "Connection pool for '{}' init with [max-total: {}, min-idle: {}, max-conn-lifetime-millis: {}, test-on-borrow: {}, test-while-idle: {}]", + MYSQL.getDbUrl(), maxTotal, minIdle, maxConnLifetimeMillis, isTestOnBorrow, + isTestWhileIdle); + } + + public Connection getConnection() throws SQLException { + + return bds.getConnection(); + } + + public DataBaseStrategy getDatabaseStrategy() { + + return MYSQL; + } + + public Map getMetrics() { + + Map metrics = Maps.newHashMap(); + metrics.put("max-total", valueOf(bds.getMaxTotal())); + metrics.put("min-idle", valueOf(bds.getMinIdle())); + metrics.put("test-on-borrow", valueOf(bds.getTestOnBorrow())); + metrics.put("test-while-idle", valueOf(bds.getTestWhileIdle())); + metrics.put("num-active", valueOf(bds.getNumActive())); + metrics.put("num-idle", valueOf(bds.getNumIdle())); + metrics.put("max-conn-lifetime-millis", valueOf(bds.getMaxConnLifetimeMillis())); + metrics.put("max-idle", valueOf(bds.getMaxIdle())); + return metrics; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/DataBaseStrategy.java b/src/main/java/it/grid/storm/persistence/pool/DataBaseStrategy.java new file mode 100644 index 000000000..03e4ae115 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/DataBaseStrategy.java @@ -0,0 +1,127 @@ +/* + * + * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package it.grid.storm.persistence.pool; + +public class DataBaseStrategy { + + private final String dbmsVendor; + private final String driverName; + private final String jdbcPrefix; + private String dbName; + private String dbPrefix; + private String dbUrl; + private String dbUsr; + private String dbPwd; + private SQLFormat formatter; + + public DataBaseStrategy(String dbmsVendor, String driverName, String prefix, + SQLFormat formatter) { + + this.dbmsVendor = dbmsVendor; + this.driverName = driverName; + jdbcPrefix = prefix; + this.formatter = formatter; + } + + + public String getDbmsVendor() { + return dbmsVendor; + } + + public String getDriverName() { + return driverName; + } + + public String getJdbcPrefix() { + + return jdbcPrefix; + } + + public void setDbUsr(String usrDb) { + + dbUsr = usrDb; + } + + public String getDbUsr() { + + return dbUsr; + } + + public void setDbPwd(String pwd) { + + dbPwd = pwd; + } + + public String getDbPwd() { + + return dbPwd; + } + + public void setDbName(String dbName) { + + this.dbName = dbName; + } + + public String getDbName() { + + return dbName; + } + + public void setDbPrefix(String dbName) { + + dbPrefix = dbName; + } + + public String getDbPrefix() { + + return dbPrefix; + } + + public void setDbUrl(String url) { + + dbUrl = url; + } + + public String getDbUrl() { + + return dbUrl; + } + + public String getConnectionString() { + + String connStr; + connStr = jdbcPrefix + dbUrl + "/" + dbName; + return connStr; + } + + public void setFormatter(SQLFormat formatter) { + + this.formatter = formatter; + } + + public SQLFormat getFormatter() { + + return formatter; + } + + @Override + public String toString() { + + return dbmsVendor; + } +} diff --git a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java b/src/main/java/it/grid/storm/persistence/pool/MySqlFormat.java similarity index 55% rename from src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java rename to src/main/java/it/grid/storm/persistence/pool/MySqlFormat.java index 222cedb7a..ee457b314 100644 --- a/src/main/java/it/grid/storm/authz/sa/AuthzDBReaderInterface.java +++ b/src/main/java/it/grid/storm/persistence/pool/MySqlFormat.java @@ -15,21 +15,29 @@ * the License. */ -package it.grid.storm.authz.sa; - -import java.util.List; - -public interface AuthzDBReaderInterface { - - public void addAuthzDB(String dbFileName) throws AuthzDBReaderException; - - public List getAuthzDBNames(); - - public void onChangeAuthzDB(String authzDBName) throws AuthzDBReaderException; - - public AuthzDBInterface getAuthzDB(String authzDBName) - throws AuthzDBReaderException; - - public long getLastParsed(String dbFileName) throws AuthzDBReaderException; +package it.grid.storm.persistence.pool; + +import java.text.SimpleDateFormat; + +public class MySqlFormat implements SQLFormat { + + private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + /** + * Create a string value of fields + * + * @param value Object + * @return String + */ + public String format(Object value) { + + if (value == null) { + return null; + } + if (value instanceof java.util.Date) { + return dateFormat.format(value); + } + return value.toString(); + } } diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java b/src/main/java/it/grid/storm/persistence/pool/SQLFormat.java similarity index 94% rename from src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java rename to src/main/java/it/grid/storm/persistence/pool/SQLFormat.java index c26dc972e..88c711089 100644 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLFormat.java +++ b/src/main/java/it/grid/storm/persistence/pool/SQLFormat.java @@ -15,7 +15,7 @@ * the License. */ -package it.grid.storm.persistence.util.db; +package it.grid.storm.persistence.pool; public interface SQLFormat { diff --git a/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java new file mode 100644 index 000000000..ac068a41b --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/StormBeIsamConnectionPool.java @@ -0,0 +1,16 @@ +package it.grid.storm.persistence.pool; + +import it.grid.storm.config.Configuration; + +public class StormBeIsamConnectionPool { + + private static DBConnectionPool instance; + + public static synchronized DBConnectionPool getInstance() { + if (instance == null) { + instance = new DBConnectionPool(Configuration.getInstance()); + } + return instance; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java b/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java new file mode 100644 index 000000000..dd4cae5f0 --- /dev/null +++ b/src/main/java/it/grid/storm/persistence/pool/StormDbConnectionPool.java @@ -0,0 +1,16 @@ +package it.grid.storm.persistence.pool; + +import it.grid.storm.config.Configuration; + +public class StormDbConnectionPool { + + private static DBConnectionPool instance; + + public static synchronized DBConnectionPool getInstance() { + if (instance == null) { + instance = new DBConnectionPool(Configuration.getInstance()); + } + return instance; + } + +} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java deleted file mode 100644 index 7441759ee..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnection.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnection implements DataSourceConnectionFactory - -{ - - private static final Logger log = LoggerFactory.getLogger(DBConnection.class); - private Connection connection = null; - private DataBaseStrategy db; - - public DBConnection(DataBaseStrategy db) throws PersistenceException { - - this.db = db; - - try { - Class.forName(db.getDriverName()).newInstance(); - } catch (Exception ex) { - log.error("Exception while getting JDBC driver: {}", ex.getMessage(), ex); - throw new PersistenceException("Driver loading problem", ex); - } - } - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - try { - result = getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (connection != null) { - try { - shutdown(); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - private Connection getConnection() throws SQLException { - - if (connection == null) { - String url = db.getConnectionString(); - connection = DriverManager.getConnection(url, db.getDbUsr(), - db.getDbPwd()); - } - return connection; - } - - private void shutdown() throws SQLException { - - connection.close(); // if there are no other open connection - connection = null; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java b/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java deleted file mode 100644 index 2eceaf047..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DBConnectionPool.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import it.grid.storm.persistence.DataSourceConnectionFactory; -import it.grid.storm.persistence.exceptions.PersistenceException; - -import java.sql.Connection; -import java.sql.SQLException; - -import org.apache.commons.dbcp.cpdsadapter.DriverAdapterCPDS; -import org.apache.commons.dbcp.datasources.SharedPoolDataSource; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DBConnectionPool implements DataSourceConnectionFactory { - - private static final Logger log = LoggerFactory - .getLogger(DBConnectionPool.class); - private DataBaseStrategy db; - private static SharedPoolDataSource sharedDatasource; - private static DBConnectionPool instance = new DBConnectionPool(); - private static long handle = -1; - - private DBConnectionPool() { - super(); - } - - public static DBConnectionPool getPoolInstance() { - if (handle == -1) { - return null; - } else { - return instance; - } - } - - public static void initPool(DataBaseStrategy db, int maxActive, int maxWait) - throws PersistenceException { - instance.init(db, maxActive, maxWait); - } - - - private void handleSQLException(SQLException e) throws PersistenceException{ - - log.error("SQL Error: {}, SQLState: {}, VendorError: {}.", - e.getMessage(), - e.getSQLState(), - e.getErrorCode(), - e); - - throw new PersistenceException(e); - - } - public Connection borrowConnection() throws PersistenceException { - - Connection result = null; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - try { - result = sharedDatasource.getConnection(); - } catch (SQLException e) { - handleSQLException(e); - } - return result; - } - - public void giveBackConnection(Connection con) throws PersistenceException { - - if (con != null) { - try { - shutdown(con); - } catch (SQLException e) { - handleSQLException(e); - } - } else { - throw new PersistenceException("Closing NON-Existing connection"); - } - } - - public String getPoolInfo() throws PersistenceException { - - String result = ""; - if (handle == -1) { - throw new PersistenceException("Connection Pool is not initialized!"); - } - if (sharedDatasource.getValidationQuery() != null) { - result += "Validation query = " + sharedDatasource.getValidationQuery() - + "\n"; - } - if (sharedDatasource.getDescription() != null) { - result += "Description = " + sharedDatasource.getDescription() + "\n"; - } - result += "Nr Connection Active = " + sharedDatasource.getNumActive() - + "\n"; - result += "Nr Connection Idle = " + sharedDatasource.getNumIdle() + "\n"; - result += "Nr Max Active Connection = " + sharedDatasource.getMaxActive() - + "\n"; - - return result; - } - - private void init(DataBaseStrategy db, int maxActive, int maxWait) { - - instance.setDatabaseStrategy(db); - DriverAdapterCPDS connectionPoolDatasource = new DriverAdapterCPDS(); - try { - connectionPoolDatasource.setDriver(db.getDriverName()); - } catch (Exception ex) { - log.error("Exception while getting driver: {}", ex.getMessage(), ex); - } - - connectionPoolDatasource.setUrl(db.getConnectionString()); - connectionPoolDatasource.setUser(db.getDbUsr()); - connectionPoolDatasource.setPassword(db.getDbPwd()); - - sharedDatasource = new SharedPoolDataSource(); - sharedDatasource.setConnectionPoolDataSource(connectionPoolDatasource); - - sharedDatasource.setMaxActive(maxActive); - sharedDatasource.setMaxWait(maxWait); - - handle = System.currentTimeMillis(); - } - - /** - * - * @throws SQLException - */ - private void shutdown(Connection conn) throws SQLException { - - conn.close(); - conn = null; - } - - public static void printInfo(DBConnectionPool pool) { - - try { - log.info("DATABASE POOL INFO: {}" , pool.getPoolInfo()); - } catch (PersistenceException ex2) { - log.error(ex2.getMessage(),ex2); - } - - } - - public DataBaseStrategy getDatabaseStrategy() { - - return db; - } - - private void setDatabaseStrategy(DataBaseStrategy db) { - - this.db = db; - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java b/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java deleted file mode 100644 index 0c436693e..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/DataBaseStrategy.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import java.util.HashMap; -import java.util.Map; - -public class DataBaseStrategy { - - private final String dbmsVendor; - private final String driverName; - private final String jdbcPrefix; - private String dbName; - private String dbPrefix; - private String dbUrl; - private String dbUsr; - private String dbPwd; - private SQLFormat formatter; - - public static final DataBaseStrategy MYSQL = new DataBaseStrategy("mysql", - "com.mysql.jdbc.Driver", "jdbc:mysql://", new MySqlFormat()); - - private static final Map DATABASES = new HashMap(); - - static { - DataBaseStrategy.DATABASES.put(DataBaseStrategy.MYSQL.toString(), - DataBaseStrategy.MYSQL); - } - - private DataBaseStrategy(String dbmsVendor, String driverName, String prefix, - SQLFormat formatter) { - - this.dbmsVendor = dbmsVendor; - this.driverName = driverName; - jdbcPrefix = prefix; - this.formatter = formatter; - } - - - public String getDbmsVendor() { - return dbmsVendor; - } - - public String getDriverName() { - return driverName; - } - - public String getJdbcPrefix() { - - return jdbcPrefix; - } - - public void setDbUsr(String usrDb) { - - dbUsr = usrDb; - } - - public String getDbUsr() { - - return dbUsr; - } - - public void setDbPwd(String pwd) { - - dbPwd = pwd; - } - - public String getDbPwd() { - - return dbPwd; - } - - public void setDbName(String dbName) { - - this.dbName = dbName; - } - - public String getDbName() { - - return dbName; - } - - public void setDbPrefix(String dbName) { - - dbPrefix = dbName; - } - - public String getDbPrefix() { - - return dbPrefix; - } - - public void setDbUrl(String url) { - - dbUrl = url; - } - - public String getDbUrl() { - - return dbUrl; - } - - public String getConnectionString() { - - String connStr; - connStr = jdbcPrefix + dbUrl + "/" + dbName; - return connStr; - } - - public void setFormatter(SQLFormat formatter) { - - this.formatter = formatter; - } - - public SQLFormat getFormatter() { - - return formatter; - } - - @Override - public String toString() { - - return dbmsVendor; - } - - - public static DataBaseStrategy getInstance(String vendor) { - - return DataBaseStrategy.DATABASES.get(vendor); - } - - public static String getDriverName(String vendor) { - - return (DataBaseStrategy.getInstance(vendor)).driverName; - } - - public static String getJdbcPrefix(String vendor) { - - return (DataBaseStrategy.getInstance(vendor)).jdbcPrefix; - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java b/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java deleted file mode 100644 index 3af7148dc..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/InsertBuilder.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; - -public class InsertBuilder extends SQLBuilder { - - private String table; - private Map columnsAndData = new HashMap(); - - public void setTable(String table) { - - this.table = table; - } - - public String getTable() { - - return table; - } - - public String getCommand() { - - return "INSERT INTO "; - } - - public String getCriteria() { - - return ""; - } - - public String getWhat() { - - StringBuilder columns = new StringBuilder(); - StringBuilder values = new StringBuilder(); - StringBuilder what = new StringBuilder(); - - String columnName = null; - Iterator iter = columnsAndData.keySet().iterator(); - while (iter.hasNext()) { - columnName = iter.next(); - columns.append(columnName); - values.append(columnsAndData.get(columnName)); - if (iter.hasNext()) { - columns.append(','); - values.append(','); - } - } - - what.append(" ("); - what.append(columns); - what.append(") VALUES ("); - what.append(values); - what.append(") "); - return what.toString(); - - } - - public void addColumnAndData(String columnName, Object value) { - - if (value != null) { - columnsAndData.put(columnName, value); - } - } -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java b/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java deleted file mode 100644 index 10afc79ea..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/MySqlFormat.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -import java.text.SimpleDateFormat; - -public class MySqlFormat implements SQLFormat { - - private static final SimpleDateFormat dateFormat = new SimpleDateFormat( - "yyyy-MM-dd HH:mm:ss"); - - /** - * Create a string value of fields insertable into the query - * - * @param value - * Object - * @return String - */ - public String format(Object value) { - - if (value == null) { - return null; - } - Class clazz = value.getClass(); - if (Character.class.equals(clazz) || char.class.equals(clazz)) { - value = value.toString(); - } - if (value instanceof String) { - return value.toString(); - } - if (value instanceof java.util.Date) { - return dateFormat.format(value); - } - return value.toString(); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java b/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java deleted file mode 100644 index cede0ea1d..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLBuilder.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -public abstract class SQLBuilder { - - public SQLBuilder() { - - super(); - } - - public abstract String getCommand(); - - public abstract String getTable(); - - public abstract String getWhat(); - - public abstract String getCriteria(); - -} diff --git a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java b/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java deleted file mode 100644 index 5a17d5f43..000000000 --- a/src/main/java/it/grid/storm/persistence/util/db/SQLHelper.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * - * Copyright (c) Istituto Nazionale di Fisica Nucleare (INFN). 2006-2010. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package it.grid.storm.persistence.util.db; - -public abstract class SQLHelper { - - public String dbmsVendor; - private SQLFormat formatter; - - protected SQLHelper(String dbmsVendor) { - - this.dbmsVendor = dbmsVendor; - this.formatter = DataBaseStrategy.getInstance(dbmsVendor).getFormatter(); - } - - public String format(Object value) { - - return formatter.format(value); - } - - /** - * - * @param value - * boolean - * @return String - */ - public String format(boolean value) { - - String result = null; - Boolean boolValue = new Boolean(value); - result = formatter.format(boolValue); - return result; - } - - /** - * - * @param value - * int - * @return String - */ - public String format(int value) { - - String result = null; - Integer intValue = null; - try { - intValue = new Integer(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(intValue); - return result; - } - - /** - * - * @param value - * long - * @return String - */ - public String format(long value) { - - String result = null; - Long longValue = null; - try { - longValue = new Long(value); - } catch (NumberFormatException nfe) { - nfe.printStackTrace(); - } - result = formatter.format(longValue); - return result; - } - - /** - * - * @param date - * Date - * @return String - */ - public String format(java.util.Date date) { - - return formatter.format(date); - } - -} diff --git a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java index d4b37aa5b..754628187 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/StorageSpaceSQLHelper.java @@ -17,11 +17,6 @@ package it.grid.storm.persistence.util.helper; -import it.grid.storm.common.types.VO; -import it.grid.storm.griduser.GridUserInterface; -import it.grid.storm.persistence.model.StorageSpaceTO; -import it.grid.storm.persistence.util.db.SQLHelper; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -31,787 +26,758 @@ import java.util.LinkedList; import java.util.List; +import com.google.common.collect.Lists; + +import it.grid.storm.common.types.VO; +import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.SQLHelper; +import it.grid.storm.persistence.model.StorageSpaceTO; + public class StorageSpaceSQLHelper extends SQLHelper { - private final static String TABLE_NAME = "storage_space"; - private final static HashMap COLS = new HashMap(); - - private static final String[] COLUMN_NAMES = { "SS_ID", "USERDN", "VOGROUP", - "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", "FREE_SIZE", - "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", - "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", - "UPDATE_TIME" }; - - static { - COLS.put("storageSpaceId", "SS_ID"); - COLS.put("ownerName", "USERDN"); - COLS.put("ownerVO", "VOGROUP"); - COLS.put("alias", "ALIAS"); - COLS.put("token", "SPACE_TOKEN"); - COLS.put("created", "CREATED"); - COLS.put("spaceFile", "SPACE_FILE"); - COLS.put("storaqeInfo", "STORAGE_INFO"); - COLS.put("lifeTime", "LIFETIME"); - COLS.put("spaceType", "SPACE_TYPE"); - COLS.put("total_size", "TOTAL_SIZE"); - COLS.put("guar_size", "GUAR_SIZE"); - COLS.put("free_size", "FREE_SIZE"); - COLS.put("used_size", "USED_SIZE"); - COLS.put("busy_size", "BUSY_SIZE"); - COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); - COLS.put("available_size", "AVAILABLE_SIZE"); - COLS.put("reserved_size", "RESERVED_SIZE"); - COLS.put("update_time", "UPDATE_TIME"); - } - - /** - * CONSTRUCTOR - */ - public StorageSpaceSQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * - * @return String[] - */ - public String[] getColumnNames() { - - return COLUMN_NAMES; - } - - /** - * INSERT NEW ROW into TABLE - * - * @param ssTO - * StorageSpaceTO - * @return String - * @throws SQLException - */ - - public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) - throws SQLException { - - List values = new LinkedList(); - - StringBuilder fields = new StringBuilder("("); - StringBuilder placeholders = new StringBuilder("("); - - if (ssTO != null) { - if (ssTO.getOwnerName() != null) { - fields.append(COLS.get("ownerName") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getOwnerName())); - } - - fields.append(COLS.get("ownerVO") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getVoName())); - - if (ssTO.getAlias() != null) { - fields.append(COLS.get("alias") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceToken() != null) { - fields.append(COLS.get("token") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceToken())); - } - if (ssTO.getCreated() != null) { - fields.append(COLS.get("created") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - fields.append(COLS.get("spaceFile") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - fields.append(COLS.get("storaqeInfo") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - fields.append(COLS.get("lifeTime") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - fields.append(COLS.get("spaceType") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - fields.append(COLS.get("total_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - fields.append(COLS.get("guar_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - fields.append(COLS.get("free_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - fields.append(COLS.get("used_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - fields.append(COLS.get("busy_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - fields.append(COLS.get("unavailable_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getUnavailableSize())); - } - - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - fields.append(COLS.get("available_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - fields.append(COLS.get("reserved_size") + (",")); - placeholders.append("?,"); - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - fields.append(COLS.get("update_time").concat(",")); - placeholders.append("?,"); - values.add(format(ssTO.getUpdateTime())); - } - } - - fields.deleteCharAt(fields.length() - 1); - fields.append(")"); - placeholders.deleteCharAt(placeholders.length() - 1); - placeholders.append(")"); - - String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() - + " VALUES " + placeholders.toString(); - PreparedStatement preparedStatement = conn.prepareStatement(str); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Create a StorageSpace Transfer Object coming from Result Set - * - * @param res - * ResultSet - * @return StorageSpaceTO - */ - public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { - - StorageSpaceTO ssTO = new StorageSpaceTO(); - - try { - ssTO.setStorageSpaceId(new Long(res.getLong("SS_ID"))); - - ssTO.setOwnerName(res.getString("USERDN")); - ssTO.setVoName(res.getString("VOGROUP")); - ssTO.setAlias(res.getString("ALIAS")); - ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); - - java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); - Date creationDate = new Date(createdTimeStamp.getTime()); - ssTO.setCreated(creationDate); - - ssTO.setSpaceFile(res.getString("SPACE_FILE")); - ssTO.setStorageInfo(res.getString("STORAGE_INFO")); - long tempLong = res.getLong("LIFETIME"); - if (!res.wasNull()) { - ssTO.setLifetime(tempLong); - } - - ssTO.setSpaceType(res.getString("SPACE_TYPE")); - - // Sizes - tempLong = res.getLong("TOTAL_SIZE"); - if (!res.wasNull()) { - ssTO.setTotalSize(tempLong); - } - tempLong = res.getLong("GUAR_SIZE"); - if (!res.wasNull()) { - ssTO.setGuaranteedSize(tempLong); - } - tempLong = res.getLong("RESERVED_SIZE"); - if (!res.wasNull()) { - ssTO.setReservedSize(tempLong); - } - tempLong = res.getLong("FREE_SIZE"); - if (!res.wasNull()) { - ssTO.setFreeSize(tempLong); - } - tempLong = res.getLong("AVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setAvailableSize(tempLong); - } - tempLong = res.getLong("USED_SIZE"); - if (!res.wasNull()) { - ssTO.setUsedSize(tempLong); - } - tempLong = res.getLong("BUSY_SIZE"); - if (!res.wasNull()) { - ssTO.setBusySize(tempLong); - } - tempLong = res.getLong("UNAVAILABLE_SIZE"); - if (!res.wasNull()) { - ssTO.setUnavailableSize(tempLong); - } - - // Last Update - java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); - Date updateDate = new Date(updatedTimeStamp.getTime()); - ssTO.setUpdateTime(updateDate); - - } catch (SQLException ex) { - ex.printStackTrace(); - } - - return ssTO; - } - - // ************ HELPER Method *************** // - - /** - * @param vo - * @return - */ - private String getVOName(String vo) { - - String voStr = VO.makeNoVo().getValue(); - if (vo != null && !vo.trim().equals("")) { - voStr = vo.trim(); - } - return voStr; - } - - /** - * - * - * @param token - * String - * @param conn - * @return String - * @throws SQLException - */ - public PreparedStatement selectByTokenQuery(Connection conn, String token) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where space_token=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, token); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasQuery(Connection conn, - GridUserInterface user, String spaceAlias) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - String dn = user.getDn(); - - if ((spaceAlias == null) || (spaceAlias.length() == 0)) { - str = "SELECT * FROM storage_space where userdn=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - } else { - str = "SELECT * FROM storage_space where userdn=? AND alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, dn); - preparedStatement.setString(2, spaceAlias); - } - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'user' and - * 'spaceAlias'. 'spaceAlias' can be NULL or empty. - * - * @param user - * VomsGridUser. - * @param spaceAlias - * String. - * @return String. - * @throws SQLException - */ - public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, - String spaceAlias) throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where alias=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, spaceAlias); - - return preparedStatement; - } - - /** - * Returns the SQL string for selecting all columns from the table - * 'storage_space' in the 'storm_be_ISAM' database matching 'voname'. - * - * @param voname - * string - * @return String. - * @throws SQLException - */ - - public PreparedStatement selectBySpaceType(Connection conn, String voname) - throws SQLException { - - /* - * This is to distinguish a client reseve space with a VOSpaceArea both with - * the same token. Only the one made by the namespace process contains a - * fake dn - */ - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where SPACE_TYPE=?"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, voname); - - return preparedStatement; - } - - /** - * This method return the SQL query to evaluate all expired space reservation - * requests. - * - * @param time - * Current time (in second) to compare to the reservationTime + - * lifetime - * @return String SQL query - * @throws SQLException - */ - public PreparedStatement selectExpiredQuery(Connection conn, - long currentTimeInSecond) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, currentTimeInSecond); - - return preparedStatement; - - } - - /** - * @param size - * @return - * @throws SQLException - */ - public PreparedStatement selectByUnavailableUsedSpaceSizeQuery( - Connection conn, long unavailableSizeValue) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("used_size") - + " IS NULL or " + COLS.get("used_size") + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, unavailableSizeValue); - - return preparedStatement; - } - - /** - * @param lastUpdateTimestamp - * @return - * @throws SQLException - */ - - public PreparedStatement selectByPreviousOrNullLastUpdateQuery( - Connection conn, long lastUpdateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM storage_space where " + COLS.get("update_time") - + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setLong(1, lastUpdateTimestamp); - - return preparedStatement; - - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'userDN' and 'spaceToken'. - * - * @param user - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, - GridUserInterface user, String spaceToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, user.getDn()); - preparedStatement.setString(2, spaceToken); - - return preparedStatement; - } - - /** - * Returns the SQL query for removing a row from the table 'storage_space' in - * the 'storm_be_ISAM' database matching 'spaceToken'. - * - * @param spaceToken - * @return - * @throws SQLException - */ - public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, spaceToken); - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByAliasAndTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add(format(getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; - - values.add(format(ssTO.getAlias())); - values.add(format(ssTO.getSpaceToken())); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * Provides a query that updates all row fields accordingly to the provided - * StorageSpaceTO and using SpaceToken as key - * - * @param ssTO - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement updateByTokenQuery(Connection conn, - StorageSpaceTO ssTO) throws IllegalArgumentException, SQLException { - - List values = new LinkedList(); - - if (ssTO == null) { - throw new IllegalArgumentException(); - } - String query = "UPDATE storage_space SET"; - if (ssTO.getOwnerName() != null) { - query += " " + COLS.get("ownerName") + " = ?" + " ,"; - values.add(format(ssTO.getOwnerName())); - } - - query += " " + COLS.get("ownerVO") + " = ?" + " ,"; - values.add((getVOName(ssTO.getVoName()))); - - if (ssTO.getCreated() != null) { - query += " " + COLS.get("created") + " = ?" + " ,"; - values.add(format(ssTO.getCreated())); - } - if (ssTO.getAlias() != null) { - query += " " + COLS.get("alias") + " = ?" + " ,"; - values.add(format(ssTO.getAlias())); - } - if (ssTO.getSpaceFile() != null) { - query += " " + COLS.get("spaceFile") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceFile())); - } - if (ssTO.getStorageInfo() != null) { - query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; - values.add(format(ssTO.getStorageInfo())); - } - if (ssTO.getLifetime() != -1) { - query += " " + COLS.get("lifeTime") + " = ?" + " ,"; - values.add(format(ssTO.getLifetime())); - } - if (ssTO.getSpaceType() != null) { - query += " " + COLS.get("spaceType") + " = ?" + " ,"; - values.add(format(ssTO.getSpaceType())); - } - if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { - query += " " + COLS.get("total_size") + " = ?" + " ,"; - values.add(format(ssTO.getTotalSize())); - } - if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { - query += " " + COLS.get("guar_size") + " = ?" + " ,"; - values.add(format(ssTO.getGuaranteedSize())); - } - if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { - query += " " + COLS.get("free_size") + " = ?" + " ,"; - values.add(format(ssTO.getFreeSize())); - } - if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { - query += " " + COLS.get("used_size") + " = ?" + " ,"; - values.add(format(ssTO.getUsedSize())); - } - if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { - query += " " + COLS.get("busy_size") + " = ?" + " ,"; - values.add(format(ssTO.getBusySize())); - } - if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { - query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; - values.add(format(ssTO.getUnavailableSize())); - } - if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { - query += " " + COLS.get("available_size") + " = ?" + " ,"; - values.add(format(ssTO.getAvailableSize())); - } - if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { - query += " " + COLS.get("reserved_size") + " = ?" + " ,"; - values.add(format(ssTO.getReservedSize())); - } - if (ssTO.getUpdateTime() != null) { - query += " " + COLS.get("update_time") + " = ?" + " ,"; - values.add(format(ssTO.getUpdateTime())); - } - if (query.charAt(query.length() - 1) == ',') { - query = query.substring(0, query.length() - 1); - } - query += " where " + COLS.get("token") + " = ?"; - - values.add(format(format(ssTO.getSpaceToken()))); - - PreparedStatement preparedStatement = conn.prepareStatement(query); - - int index = 1; - for (String val : values) { - preparedStatement.setString(index, val); - index++; - } - - return preparedStatement; - } - - /** - * - * @param token - * String - * @param freeSpace - * long - * @return String - * @throws SQLException - */ - public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, - String token, long freeSpace, Date updateTimestamp) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" - + " WHERE space_token=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, freeSpace); - preparedStatement.setString(2, format(updateTimestamp)); - preparedStatement.setString(3, token); - - return preparedStatement; - } - - public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToAdd) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " - + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setLong(1, usedSpaceToAdd); - preparedStatement.setLong(2, usedSpaceToAdd); - preparedStatement.setLong(3, usedSpaceToAdd); - preparedStatement.setLong(4, usedSpaceToAdd); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToAdd); - - return preparedStatement; - - } - - public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, - String token, long usedSpaceToRemove) - throws SQLException { + private final static String TABLE_NAME = "storage_space"; + private final static HashMap COLS = new HashMap(); + + private static final String[] COLUMN_NAMES = + {"SS_ID", "USERDN", "VOGROUP", "ALIAS", "SPACE_TOKEN", "CREATED", "TOTAL_SIZE", "GUAR_SIZE", + "FREE_SIZE", "SPACE_FILE", "STORAGE_INFO", "LIFETIME", "SPACE_TYPE", "USED_SIZE", + "BUSY_SIZE", "UNAVAILABLE_SIZE", "AVAILABLE_SIZE", "RESERVED_SIZE", "UPDATE_TIME"}; + + static { + COLS.put("storageSpaceId", "SS_ID"); + COLS.put("ownerName", "USERDN"); + COLS.put("ownerVO", "VOGROUP"); + COLS.put("alias", "ALIAS"); + COLS.put("token", "SPACE_TOKEN"); + COLS.put("created", "CREATED"); + COLS.put("spaceFile", "SPACE_FILE"); + COLS.put("storaqeInfo", "STORAGE_INFO"); + COLS.put("lifeTime", "LIFETIME"); + COLS.put("spaceType", "SPACE_TYPE"); + COLS.put("total_size", "TOTAL_SIZE"); + COLS.put("guar_size", "GUAR_SIZE"); + COLS.put("free_size", "FREE_SIZE"); + COLS.put("used_size", "USED_SIZE"); + COLS.put("busy_size", "BUSY_SIZE"); + COLS.put("unavailable_size", "UNAVAILABLE_SIZE"); + COLS.put("available_size", "AVAILABLE_SIZE"); + COLS.put("reserved_size", "RESERVED_SIZE"); + COLS.put("update_time", "UPDATE_TIME"); + } + + /** + * + * @return String[] + */ + public String[] getColumnNames() { + + return COLUMN_NAMES; + } + + /** + * INSERT NEW ROW into TABLE + * + * @param ssTO StorageSpaceTO + * @return String + * @throws SQLException + */ + + public PreparedStatement insertQuery(Connection conn, StorageSpaceTO ssTO) throws SQLException { + + List values = Lists.newLinkedList(); + + StringBuilder fields = new StringBuilder("("); + StringBuilder placeholders = new StringBuilder("("); + + if (ssTO != null) { + if (ssTO.getOwnerName() != null) { + fields.append(COLS.get("ownerName") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getOwnerName())); + } + + fields.append(COLS.get("ownerVO") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getVoName())); + + if (ssTO.getAlias() != null) { + fields.append(COLS.get("alias") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceToken() != null) { + fields.append(COLS.get("token") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceToken())); + } + if (ssTO.getCreated() != null) { + fields.append(COLS.get("created") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + fields.append(COLS.get("spaceFile") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + fields.append(COLS.get("storaqeInfo") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + fields.append(COLS.get("lifeTime") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + fields.append(COLS.get("spaceType") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + fields.append(COLS.get("total_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + fields.append(COLS.get("guar_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + fields.append(COLS.get("free_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + fields.append(COLS.get("used_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + fields.append(COLS.get("busy_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + fields.append(COLS.get("unavailable_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getUnavailableSize())); + } + + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + fields.append(COLS.get("available_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + fields.append(COLS.get("reserved_size") + (",")); + placeholders.append("?,"); + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + fields.append(COLS.get("update_time").concat(",")); + placeholders.append("?,"); + values.add(format(ssTO.getUpdateTime())); + } + } - String str = null; - PreparedStatement preparedStatement = null; + fields.deleteCharAt(fields.length() - 1); + fields.append(")"); + placeholders.deleteCharAt(placeholders.length() - 1); + placeholders.append(")"); - str = "UPDATE storage_space " - + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " - + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " - + " UPDATE_TIME = NOW() " - + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + String str = "INSERT INTO " + TABLE_NAME + " " + fields.toString() + " VALUES " + + placeholders.toString(); + PreparedStatement preparedStatement = conn.prepareStatement(str); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Create a StorageSpace Transfer Object coming from Result Set + * + * @param res ResultSet + * @return StorageSpaceTO + */ + public StorageSpaceTO makeStorageSpaceTO(ResultSet res) { + + StorageSpaceTO ssTO = new StorageSpaceTO(); + + try { + ssTO.setStorageSpaceId(new Long(res.getLong("SS_ID"))); + + ssTO.setOwnerName(res.getString("USERDN")); + ssTO.setVoName(res.getString("VOGROUP")); + ssTO.setAlias(res.getString("ALIAS")); + ssTO.setSpaceToken(res.getString("SPACE_TOKEN")); + + java.sql.Timestamp createdTimeStamp = res.getTimestamp("CREATED"); + Date creationDate = new Date(createdTimeStamp.getTime()); + ssTO.setCreated(creationDate); + + ssTO.setSpaceFile(res.getString("SPACE_FILE")); + ssTO.setStorageInfo(res.getString("STORAGE_INFO")); + long tempLong = res.getLong("LIFETIME"); + if (!res.wasNull()) { + ssTO.setLifetime(tempLong); + } + + ssTO.setSpaceType(res.getString("SPACE_TYPE")); + + // Sizes + tempLong = res.getLong("TOTAL_SIZE"); + if (!res.wasNull()) { + ssTO.setTotalSize(tempLong); + } + tempLong = res.getLong("GUAR_SIZE"); + if (!res.wasNull()) { + ssTO.setGuaranteedSize(tempLong); + } + tempLong = res.getLong("RESERVED_SIZE"); + if (!res.wasNull()) { + ssTO.setReservedSize(tempLong); + } + tempLong = res.getLong("FREE_SIZE"); + if (!res.wasNull()) { + ssTO.setFreeSize(tempLong); + } + tempLong = res.getLong("AVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setAvailableSize(tempLong); + } + tempLong = res.getLong("USED_SIZE"); + if (!res.wasNull()) { + ssTO.setUsedSize(tempLong); + } + tempLong = res.getLong("BUSY_SIZE"); + if (!res.wasNull()) { + ssTO.setBusySize(tempLong); + } + tempLong = res.getLong("UNAVAILABLE_SIZE"); + if (!res.wasNull()) { + ssTO.setUnavailableSize(tempLong); + } + + // Last Update + java.sql.Timestamp updatedTimeStamp = res.getTimestamp("UPDATE_TIME"); + Date updateDate = new Date(updatedTimeStamp.getTime()); + ssTO.setUpdateTime(updateDate); + + } catch (SQLException ex) { + ex.printStackTrace(); + } + return ssTO; + } + + // ************ HELPER Method *************** // + + /** + * @param vo + * @return + */ + private String getVOName(String vo) { + + String voStr = VO.makeNoVo().getValue(); + if (vo != null && !vo.trim().equals("")) { + voStr = vo.trim(); + } + return voStr; + } + + /** + * + * + * @param token String + * @param conn + * @return String + * @throws SQLException + */ + public PreparedStatement selectByTokenQuery(Connection conn, String token) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where space_token=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, token); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasQuery(Connection conn, GridUserInterface user, + String spaceAlias) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + String dn = user.getDn(); + + if ((spaceAlias == null) || (spaceAlias.length() == 0)) { + str = "SELECT * FROM storage_space where userdn=?"; preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + } else { + str = "SELECT * FROM storage_space where userdn=? AND alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, dn); + preparedStatement.setString(2, spaceAlias); + } + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'user' and 'spaceAlias'. 'spaceAlias' can be NULL or empty. + * + * @param user VomsGridUser. + * @param spaceAlias String. + * @return String. + * @throws SQLException + */ + public PreparedStatement selectBySpaceAliasOnlyQuery(Connection conn, String spaceAlias) + throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where alias=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, spaceAlias); + + return preparedStatement; + } + + /** + * Returns the SQL string for selecting all columns from the table 'storage_space' in the + * 'storm_be_ISAM' database matching 'voname'. + * + * @param voname string + * @return String. + * @throws SQLException + */ + + public PreparedStatement selectBySpaceType(Connection conn, String voname) throws SQLException { + + /* + * This is to distinguish a client reseve space with a VOSpaceArea both with the same token. + * Only the one made by the namespace process contains a fake dn + */ + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where SPACE_TYPE=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, voname); + + return preparedStatement; + } + + /** + * This method return the SQL query to evaluate all expired space reservation requests. + * + * @param time Current time (in second) to compare to the reservationTime + lifetime + * @return String SQL query + * @throws SQLException + */ + public PreparedStatement selectExpiredQuery(Connection conn, long currentTimeInSecond) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = + "SELECT * FROM storage_space where lifetime is not null and (UNIX_TIMESTAMP(created)+lifetime< ?)"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, currentTimeInSecond); + + return preparedStatement; + + } + + /** + * @param size + * @return + * @throws SQLException + */ + public PreparedStatement selectByUnavailableUsedSpaceSizeQuery(Connection conn, + long unavailableSizeValue) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; - preparedStatement.setLong(1, usedSpaceToRemove); - preparedStatement.setLong(2, usedSpaceToRemove); - preparedStatement.setLong(3, usedSpaceToRemove); - preparedStatement.setLong(4, usedSpaceToRemove); - preparedStatement.setString(5, token); - preparedStatement.setLong(6, usedSpaceToRemove); + str = "SELECT * FROM storage_space where " + COLS.get("used_size") + " IS NULL or " + + COLS.get("used_size") + "=?"; - return preparedStatement; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, unavailableSizeValue); + return preparedStatement; + } + + /** + * @param lastUpdateTimestamp + * @return + * @throws SQLException + */ + + public PreparedStatement selectByPreviousOrNullLastUpdateQuery(Connection conn, + long lastUpdateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM storage_space where " + COLS.get("update_time") + + " IS NULL or UNIX_TIMESTAMP(" + COLS.get("update_time") + ") < ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setLong(1, lastUpdateTimestamp); + + return preparedStatement; + + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'userDN' and 'spaceToken'. + * + * @param user + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, GridUserInterface user, + String spaceToken) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE ((USERDN=?) AND (SPACE_TOKEN=?))"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, user.getDn()); + preparedStatement.setString(2, spaceToken); + + return preparedStatement; + } + + /** + * Returns the SQL query for removing a row from the table 'storage_space' in the 'storm_be_ISAM' + * database matching 'spaceToken'. + * + * @param spaceToken + * @return + * @throws SQLException + */ + public PreparedStatement removeByTokenQuery(Connection conn, String spaceToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "DELETE FROM storage_space WHERE (SPACE_TOKEN=?)"; + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, spaceToken); + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByAliasAndTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add(format(getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("alias") + " = ?" + " and " + COLS.get("token") + " = ?"; + + values.add(format(ssTO.getAlias())); + values.add(format(ssTO.getSpaceToken())); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * Provides a query that updates all row fields accordingly to the provided StorageSpaceTO and + * using SpaceToken as key + * + * @param ssTO + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement updateByTokenQuery(Connection conn, StorageSpaceTO ssTO) + throws IllegalArgumentException, SQLException { + + List values = new LinkedList(); + + if (ssTO == null) { + throw new IllegalArgumentException(); + } + String query = "UPDATE storage_space SET"; + if (ssTO.getOwnerName() != null) { + query += " " + COLS.get("ownerName") + " = ?" + " ,"; + values.add(format(ssTO.getOwnerName())); + } + + query += " " + COLS.get("ownerVO") + " = ?" + " ,"; + values.add((getVOName(ssTO.getVoName()))); + + if (ssTO.getCreated() != null) { + query += " " + COLS.get("created") + " = ?" + " ,"; + values.add(format(ssTO.getCreated())); + } + if (ssTO.getAlias() != null) { + query += " " + COLS.get("alias") + " = ?" + " ,"; + values.add(format(ssTO.getAlias())); + } + if (ssTO.getSpaceFile() != null) { + query += " " + COLS.get("spaceFile") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceFile())); + } + if (ssTO.getStorageInfo() != null) { + query += " " + COLS.get("storaqeInfo") + " = ?" + " ,"; + values.add(format(ssTO.getStorageInfo())); + } + if (ssTO.getLifetime() != -1) { + query += " " + COLS.get("lifeTime") + " = ?" + " ,"; + values.add(format(ssTO.getLifetime())); + } + if (ssTO.getSpaceType() != null) { + query += " " + COLS.get("spaceType") + " = ?" + " ,"; + values.add(format(ssTO.getSpaceType())); + } + if ((ssTO.getTotalSize() != 0) || (ssTO.getTotalSize() != -1)) { + query += " " + COLS.get("total_size") + " = ?" + " ,"; + values.add(format(ssTO.getTotalSize())); + } + if ((ssTO.getGuaranteedSize() != 0) || (ssTO.getGuaranteedSize() != -1)) { + query += " " + COLS.get("guar_size") + " = ?" + " ,"; + values.add(format(ssTO.getGuaranteedSize())); + } + if ((ssTO.getFreeSize() != 0) || (ssTO.getFreeSize() != -1)) { + query += " " + COLS.get("free_size") + " = ?" + " ,"; + values.add(format(ssTO.getFreeSize())); + } + if ((ssTO.getUsedSize() != 0) || (ssTO.getUsedSize() != -1)) { + query += " " + COLS.get("used_size") + " = ?" + " ,"; + values.add(format(ssTO.getUsedSize())); + } + if ((ssTO.getBusySize() != 0) || (ssTO.getBusySize() != -1)) { + query += " " + COLS.get("busy_size") + " = ?" + " ,"; + values.add(format(ssTO.getBusySize())); + } + if ((ssTO.getUnavailableSize() != 0) || (ssTO.getUnavailableSize() != -1)) { + query += " " + COLS.get("unavailable_size") + " = ?" + " ,"; + values.add(format(ssTO.getUnavailableSize())); + } + if ((ssTO.getAvailableSize() != 0) || (ssTO.getAvailableSize() != -1)) { + query += " " + COLS.get("available_size") + " = ?" + " ,"; + values.add(format(ssTO.getAvailableSize())); + } + if ((ssTO.getReservedSize() != 0) || (ssTO.getReservedSize() != -1)) { + query += " " + COLS.get("reserved_size") + " = ?" + " ,"; + values.add(format(ssTO.getReservedSize())); + } + if (ssTO.getUpdateTime() != null) { + query += " " + COLS.get("update_time") + " = ?" + " ,"; + values.add(format(ssTO.getUpdateTime())); + } + if (query.charAt(query.length() - 1) == ',') { + query = query.substring(0, query.length() - 1); + } + query += " where " + COLS.get("token") + " = ?"; + + values.add(format(format(ssTO.getSpaceToken()))); + + PreparedStatement preparedStatement = conn.prepareStatement(query); + + int index = 1; + for (String val : values) { + preparedStatement.setString(index, val); + index++; + } + + return preparedStatement; + } + + /** + * + * @param token String + * @param freeSpace long + * @return String + * @throws SQLException + */ + public PreparedStatement updateFreeSpaceByTokenQuery(Connection conn, String token, + long freeSpace, Date updateTimestamp) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space SET free_size=?" + " , " + "UPDATE_TIME=?" + " WHERE space_token=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, freeSpace); + preparedStatement.setString(2, format(updateTimestamp)); + preparedStatement.setString(3, token); + + return preparedStatement; + } + + public PreparedStatement increaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToAdd) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE + ?, BUSY_SIZE = BUSY_SIZE + ?, " + + " FREE_SIZE = FREE_SIZE - ?, AVAILABLE_SIZE = AVAILABLE_SIZE - ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE + ? <= TOTAL_SIZE "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToAdd); + preparedStatement.setLong(2, usedSpaceToAdd); + preparedStatement.setLong(3, usedSpaceToAdd); + preparedStatement.setLong(4, usedSpaceToAdd); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToAdd); + + return preparedStatement; + + } + + public PreparedStatement decreaseUsedSpaceByTokenQuery(Connection conn, String token, + long usedSpaceToRemove) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE storage_space " + " SET USED_SIZE = USED_SIZE - ?, BUSY_SIZE = BUSY_SIZE - ?, " + + " FREE_SIZE = FREE_SIZE + ?, AVAILABLE_SIZE = AVAILABLE_SIZE + ?, " + + " UPDATE_TIME = NOW() " + " WHERE space_token=? AND USED_SIZE - ? >= 0 "; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setLong(1, usedSpaceToRemove); + preparedStatement.setLong(2, usedSpaceToRemove); + preparedStatement.setLong(3, usedSpaceToRemove); + preparedStatement.setLong(4, usedSpaceToRemove); + preparedStatement.setString(5, token); + preparedStatement.setLong(6, usedSpaceToRemove); + + return preparedStatement; + + } } diff --git a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java index f912c47f8..5f682baf0 100644 --- a/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java +++ b/src/main/java/it/grid/storm/persistence/util/helper/TapeRecallMySQLHelper.java @@ -17,8 +17,8 @@ package it.grid.storm.persistence.util.helper; +import it.grid.storm.persistence.model.SQLHelper; import it.grid.storm.persistence.model.TapeRecallTO; -import it.grid.storm.persistence.util.db.SQLHelper; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; import java.sql.Connection; @@ -30,560 +30,538 @@ public class TapeRecallMySQLHelper extends SQLHelper { - private static final String TABLE_NAME = "tape_recall"; - - // primary key COL_TASK_ID + COL_REQUEST_TOKEN - public static final String COL_TASK_ID = "taskId"; - public static final String COL_REQUEST_TOKEN = "requestToken"; - public static final String COL_REQUEST_TYPE = "requestType"; - public static final String COL_FILE_NAME = "fileName"; - public static final String COL_PIN_LIFETIME = "pinLifetime"; - public static final String COL_STATUS = "status"; - public static final String COL_USER_ID = "userID"; - public static final String COL_VO_NAME = "voName"; - public static final String COL_DATE = "timeStamp"; - public static final String COL_RETRY_ATTEMPT = "retryAttempt"; - public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; - public static final String COL_GROUP_TASK_ID = "groupTaskId"; - public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; - public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; - - private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; - private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; - - static { - - QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " - + "LIMIT ?"; - - QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = - "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " - + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; - } - - public TapeRecallMySQLHelper(String dbmsVendor) { - - super(dbmsVendor); - } - - /** - * Verifies if the given string is the name of one of the timestamp columns - * - * @param columnName - * @return - */ - private static boolean validTimestampColumnName(String columnName) { - - return COL_DATE.equals(columnName) - || COL_IN_PROGRESS_DATE.equals(columnName) - || COL_FINAL_STATUS_DATE.equals(columnName); - } - - /** - * @param conn - * @param recallTask - * @return a PreparedStatement for the requested query - */ - public PreparedStatement getQueryInsertTask(Connection conn, - TapeRecallTO recallTask) { - - if (recallTask == null) { - return null; - } - - String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " - + COL_REQUEST_TOKEN + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME - + ", " + COL_PIN_LIFETIME + ", " + COL_STATUS + ", " + COL_VO_NAME + ", " - + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + COL_DEFERRED_STARTTIME - + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID - + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - try { - PreparedStatement prepStat = conn.prepareStatement(query); - - int idx = 1; - prepStat.setString(idx++, recallTask.getTaskId().toString()); - prepStat.setString(idx++, recallTask.getRequestToken().getValue()); - prepStat.setString(idx++, recallTask.getRequestType().name()); - prepStat.setString(idx++, recallTask.getFileName()); - prepStat.setInt(idx++, recallTask.getPinLifetime()); - prepStat.setInt(idx++, recallTask.getStatusId()); - - prepStat.setString(idx++, recallTask.getVoName()); - prepStat.setString(idx++, recallTask.getUserID()); - prepStat.setInt(idx++, recallTask.getRetryAttempt()); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getDeferredRecallInstant().getTime())); - prepStat.setTimestamp(idx++, new java.sql.Timestamp(recallTask - .getInsertionInstant().getTime())); - prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); - return prepStat; - - } catch (SQLException e) { - return null; - } - } - - /** - * @param taskId - * @param requestToken - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, - String requestToken) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" - + " AND " + COL_REQUEST_TOKEN + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - preparedStatement.setString(2, requestToken); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTasks(Connection conn, - UUID groupTaskId) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @param taskId - * @param statuses - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetGroupTaskIds(Connection conn, - UUID taskId, int[] statuses) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " - + COL_IN_PROGRESS_DATE + " , " + COL_FINAL_STATUS_DATE + " FROM " - + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + COL_STATUS - + " IN ( "; - - boolean first = true; - for (int status : statuses) { - if (first) { - first = false; - } else { - str += " , "; - } - str += status; - } - str += " )"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setString(1, taskId.toString()); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; + private static final String TABLE_NAME = "tape_recall"; + + // primary key COL_TASK_ID + COL_REQUEST_TOKEN + public static final String COL_TASK_ID = "taskId"; + public static final String COL_REQUEST_TOKEN = "requestToken"; + public static final String COL_REQUEST_TYPE = "requestType"; + public static final String COL_FILE_NAME = "fileName"; + public static final String COL_PIN_LIFETIME = "pinLifetime"; + public static final String COL_STATUS = "status"; + public static final String COL_USER_ID = "userID"; + public static final String COL_VO_NAME = "voName"; + public static final String COL_DATE = "timeStamp"; + public static final String COL_RETRY_ATTEMPT = "retryAttempt"; + public static final String COL_DEFERRED_STARTTIME = "deferredStartTime"; + public static final String COL_GROUP_TASK_ID = "groupTaskId"; + public static final String COL_IN_PROGRESS_DATE = "inProgressTime"; + public static final String COL_FINAL_STATUS_DATE = "finalStatusTime"; + + private static final String QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS; + private static final String QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS; + + static { + + QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) " + "LIMIT ?"; + + QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS = + "DELETE FROM tape_recall WHERE status<>1 AND status<>2 " + + "AND timeStamp <= DATE_SUB(CURRENT_TIMESTAMP(), INTERVAL ? SECOND) "; + } + + /** + * Verifies if the given string is the name of one of the timestamp columns + * + * @param columnName + * @return + */ + private static boolean validTimestampColumnName(String columnName) { + + return COL_DATE.equals(columnName) || COL_IN_PROGRESS_DATE.equals(columnName) + || COL_FINAL_STATUS_DATE.equals(columnName); + } + + /** + * @param conn + * @param recallTask + * @return a PreparedStatement for the requested query + */ + public PreparedStatement getQueryInsertTask(Connection conn, TapeRecallTO recallTask) { + + if (recallTask == null) { + return null; + } + + String query = "INSERT INTO " + TABLE_NAME + " (" + COL_TASK_ID + ", " + COL_REQUEST_TOKEN + + ", " + COL_REQUEST_TYPE + ", " + COL_FILE_NAME + ", " + COL_PIN_LIFETIME + ", " + + COL_STATUS + ", " + COL_VO_NAME + ", " + COL_USER_ID + ", " + COL_RETRY_ATTEMPT + ", " + + COL_DEFERRED_STARTTIME + ", " + COL_DATE + ", " + COL_GROUP_TASK_ID + + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + try { + PreparedStatement prepStat = conn.prepareStatement(query); + + int idx = 1; + prepStat.setString(idx++, recallTask.getTaskId().toString()); + prepStat.setString(idx++, recallTask.getRequestToken().getValue()); + prepStat.setString(idx++, recallTask.getRequestType().name()); + prepStat.setString(idx++, recallTask.getFileName()); + prepStat.setInt(idx++, recallTask.getPinLifetime()); + prepStat.setInt(idx++, recallTask.getStatusId()); + + prepStat.setString(idx++, recallTask.getVoName()); + prepStat.setString(idx++, recallTask.getUserID()); + prepStat.setInt(idx++, recallTask.getRetryAttempt()); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getDeferredRecallInstant().getTime())); + prepStat.setTimestamp(idx++, + new java.sql.Timestamp(recallTask.getInsertionInstant().getTime())); + prepStat.setString(idx++, recallTask.getGroupTaskId().toString()); + return prepStat; + + } catch (SQLException e) { + return null; + } + } + + /** + * @param taskId + * @param requestToken + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTask(Connection conn, UUID taskId, String requestToken) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + " AND " + + COL_REQUEST_TOKEN + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + preparedStatement.setString(2, requestToken); + + return preparedStatement; + } + + /** + * @param groupTaskId + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTasks(Connection conn, UUID groupTaskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, groupTaskId.toString()); + + return preparedStatement; + } + + /** + * @param taskId + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setString(1, taskId.toString()); + + return preparedStatement; + } + + /** + * @param taskId + * @param statuses + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetGroupTaskIds(Connection conn, UUID taskId, int[] statuses) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT DISTINCT " + COL_GROUP_TASK_ID + " , " + COL_STATUS + " , " + COL_IN_PROGRESS_DATE + + " , " + COL_FINAL_STATUS_DATE + " FROM " + TABLE_NAME + " WHERE " + COL_TASK_ID + "=?" + + " AND " + COL_STATUS + " IN ( "; + + boolean first = true; + for (int status : statuses) { + if (first) { + first = false; + } else { + str += " , "; + } + str += status; + } + str += " )"; - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; + preparedStatement = conn.prepareStatement(str); + preparedStatement.setString(1, taskId.toString()); - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + return preparedStatement; + } - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberQueued(Connection conn, String voName) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryReadyForTakeOver(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " - + COL_DEFERRED_STARTTIME + "<=NOW()"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn) - throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - - return preparedStatement; - } - - /** - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryNumberInProgress(Connection conn, - String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME - + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setString(2, voName); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " - + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - } - - /** - * @param numberOfTasks - * @param voName - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, - int numberOfTasks, String voName) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME - + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); - preparedStatement.setString(2, voName); - preparedStatement.setInt(3, numberOfTasks); - - return preparedStatement; - } - - /** - * Creates the query string for looking up all the information related to in - * progress tasks in the recall database. - * - * @param numberOfTasks - * the maximum number of task returned - * @return the query string - * @throws SQLException - */ - public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, - int numberOfTasks) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" - + " ORDER BY " + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; - - preparedStatement = conn.prepareStatement(str); - preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); - preparedStatement.setInt(2, numberOfTasks); - - return preparedStatement; - - } - - /** - * @param taskList - * @param date - * @param j - * @return - * @throws SQLException - */ - public PreparedStatement getQueryUpdateTasksStatus(Connection conn, - List taskList, int statusId, String timestampColumn, - Date timestamp) throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (taskList.size() == 0) { - return null; - } - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - for (int i = 1; i < taskList.size(); i++) { - str += " OR " + COL_GROUP_TASK_ID + "=?"; - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, statusId); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, taskList.get(0).getGroupTaskId() - .toString()); - - int idx = 4; - for (int i = 1; i < taskList.size(); i++) { - preparedStatement.setString(idx, taskList.get(i).getGroupTaskId() - .toString()); - idx++; - } - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param status - * @param timestampColumn - * @param timestamp - * @return - * @throws IllegalArgumentException - * @throws SQLException - */ - public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, - UUID groupTaskId, int status, String timestampColumn, Date timestamp) - throws IllegalArgumentException, SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - if (validTimestampColumnName(timestampColumn)) { - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " - + timestampColumn + "=?" + " WHERE " + COL_GROUP_TASK_ID + "=?" - + " AND " + COL_STATUS + "!=?"; - - } else { - throw new IllegalArgumentException( - "Unable to update row status and timestamp. The priovided timestamp column \'" - + timestampColumn + "\' is not valid"); - } - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setTimestamp(2, - new java.sql.Timestamp(timestamp.getTime())); - preparedStatement.setString(3, groupTaskId.toString()); - preparedStatement.setInt(4, status); - - return preparedStatement; - - } - - /** - * @param groupTaskId - * @param status - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, - UUID groupTaskId, int status) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " - + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, status); - preparedStatement.setString(2, groupTaskId.toString()); - preparedStatement.setInt(3, status); - - return preparedStatement; - } - - /** - * @param groupTaskId - * @param value - * @return the requested query as string - * @throws SQLException - */ - public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, - UUID groupTaskId, int value) throws SQLException { - - String str = null; - PreparedStatement preparedStatement = null; - - str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" - + " WHERE " + COL_GROUP_TASK_ID + "=?"; - - preparedStatement = conn.prepareStatement(str); - - preparedStatement.setInt(1, value); - preparedStatement.setString(2, groupTaskId.toString()); - - return preparedStatement; - } - - /** - * @param con - * @param expirationTime - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) - throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); - ps.setLong(1, expirationTime); - - return ps; - } - - /** - * @param con - * @param expirationTime - * @param maxNumTasks - * @return the requested query as @PreparedStatement - * @throws SQLException - */ - public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, - int maxNumTasks) throws SQLException { - - PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberQueued(Connection conn) throws SQLException { - ps.setLong(1, expirationTime); - ps.setInt(2, maxNumTasks); + String str = null; + PreparedStatement preparedStatement = null; - return ps; - } + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberQueued(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryReadyForTakeOver(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryReadyForTakeOver(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?" + " AND " + COL_DEFERRED_STARTTIME + + "<=NOW()"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberInProgress(Connection conn) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + + return preparedStatement; + } + + /** + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryNumberInProgress(Connection conn, String voName) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT COUNT(DISTINCT " + COL_GROUP_TASK_ID + ") FROM " + TABLE_NAME + " WHERE " + + COL_STATUS + "=?" + " AND " + COL_VO_NAME + "=?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setString(2, voName); + + return preparedStatement; + } + + /** + * @param numberOfTasks + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + } + + /** + * @param numberOfTasks + * @param voName + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQueryGetTakeoverTasksWithDoubles(Connection conn, int numberOfTasks, + String voName) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " AND " + COL_VO_NAME + + "=?" + " AND " + COL_DEFERRED_STARTTIME + "<=NOW() ORDER BY " + COL_DEFERRED_STARTTIME + + " LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.QUEUED.getStatusId()); + preparedStatement.setString(2, voName); + preparedStatement.setInt(3, numberOfTasks); + + return preparedStatement; + } + + /** + * Creates the query string for looking up all the information related to in progress tasks in the + * recall database. + * + * @param numberOfTasks the maximum number of task returned + * @return the query string + * @throws SQLException + */ + public PreparedStatement getQueryGetAllTasksInProgress(Connection conn, int numberOfTasks) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "SELECT * FROM " + TABLE_NAME + " WHERE " + COL_STATUS + "=?" + " ORDER BY " + + COL_IN_PROGRESS_DATE + " ASC LIMIT ?"; + + preparedStatement = conn.prepareStatement(str); + preparedStatement.setInt(1, TapeRecallStatus.IN_PROGRESS.getStatusId()); + preparedStatement.setInt(2, numberOfTasks); + + return preparedStatement; + + } + + /** + * @param taskList + * @param date + * @param j + * @return + * @throws SQLException + */ + public PreparedStatement getQueryUpdateTasksStatus(Connection conn, List taskList, + int statusId, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (taskList.size() == 0) { + return null; + } + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?"; + + for (int i = 1; i < taskList.size(); i++) { + str += " OR " + COL_GROUP_TASK_ID + "=?"; + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, statusId); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, taskList.get(0).getGroupTaskId().toString()); + + int idx = 4; + for (int i = 1; i < taskList.size(); i++) { + preparedStatement.setString(idx, taskList.get(i).getGroupTaskId().toString()); + idx++; + } + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + return preparedStatement; + } + + /** + * @param groupTaskId + * @param status + * @param timestampColumn + * @param timestamp + * @return + * @throws IllegalArgumentException + * @throws SQLException + */ + public PreparedStatement getQueryUpdateGroupTaskStatus(Connection conn, UUID groupTaskId, + int status, String timestampColumn, Date timestamp) + throws IllegalArgumentException, SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + if (validTimestampColumnName(timestampColumn)) { + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " , " + timestampColumn + "=?" + + " WHERE " + COL_GROUP_TASK_ID + "=?" + " AND " + COL_STATUS + "!=?"; + + } else { + throw new IllegalArgumentException( + "Unable to update row status and timestamp. The priovided timestamp column \'" + + timestampColumn + "\' is not valid"); + } + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setTimestamp(2, new java.sql.Timestamp(timestamp.getTime())); + preparedStatement.setString(3, groupTaskId.toString()); + preparedStatement.setInt(4, status); + + return preparedStatement; + + } + + /** + * @param groupTaskId + * @param status + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQuerySetGroupTaskStatus(Connection conn, UUID groupTaskId, int status) + throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_STATUS + "=?" + " WHERE " + COL_GROUP_TASK_ID + + "=?" + " AND " + COL_STATUS + "!=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, status); + preparedStatement.setString(2, groupTaskId.toString()); + preparedStatement.setInt(3, status); + + return preparedStatement; + } + + /** + * @param groupTaskId + * @param value + * @return the requested query as string + * @throws SQLException + */ + public PreparedStatement getQuerySetGroupTaskRetryValue(Connection conn, UUID groupTaskId, + int value) throws SQLException { + + String str = null; + PreparedStatement preparedStatement = null; + + str = "UPDATE " + TABLE_NAME + " SET " + COL_RETRY_ATTEMPT + "=?" + " WHERE " + + COL_GROUP_TASK_ID + "=?"; + + preparedStatement = conn.prepareStatement(str); + + preparedStatement.setInt(1, value); + preparedStatement.setString(2, groupTaskId.toString()); + + return preparedStatement; + } + + /** + * @param con + * @param expirationTime + * @return the requested query as @PreparedStatement + * @throws SQLException + */ + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime) + throws SQLException { + + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_ALL_OLD_AND_COMPLETED_TASKS); + ps.setLong(1, expirationTime); + + return ps; + } + + /** + * @param con + * @param expirationTime + * @param maxNumTasks + * @return the requested query as @PreparedStatement + * @throws SQLException + */ + public PreparedStatement getQueryDeleteCompletedTasks(Connection con, long expirationTime, + int maxNumTasks) throws SQLException { + + PreparedStatement ps = con.prepareStatement(QUERY_DELETE_N_OLD_AND_COMPLETED_TASKS); + + ps.setLong(1, expirationTime); + ps.setInt(2, maxNumTasks); + + return ps; + } } diff --git a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java index 30606ca65..aa4dd10ff 100644 --- a/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java +++ b/src/main/java/it/grid/storm/space/SimpleSpaceUpdaterHelper.java @@ -8,72 +8,67 @@ public class SimpleSpaceUpdaterHelper implements SpaceUpdaterHelperInterface { - private static final Logger log = LoggerFactory - .getLogger(SimpleSpaceUpdaterHelper.class); - - private ReservedSpaceCatalog rsc; - - public SimpleSpaceUpdaterHelper() { - rsc = new ReservedSpaceCatalog(); - } - - private StorageSpaceData getStorageSpaceDataForVFS(VirtualFSInterface vfs) { - - return rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); - } - - @Override - public boolean increaseUsedSpace(VirtualFSInterface vfs, long size) { - - log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to add is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be increased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.increaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } - - @Override - public boolean decreaseUsedSpace(VirtualFSInterface vfs, long size) { - - log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); - - if (size < 0) { - log.error("Size to remove is a negative value: {}", size); - return false; - } - if (size == 0) { - log.debug("Size is zero, vfs {} used space won't be decreased!", - vfs.getAliasName()); - return true; - } - - log.debug("Get StorageSpaceData from vfs ..."); - StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); - - if (ssd == null) { - log.error("Unable to get StorageSpaceData from alias name {}", - vfs.getAliasName()); - return false; - } - - return rsc.decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); - } + private static final Logger log = LoggerFactory.getLogger(SimpleSpaceUpdaterHelper.class); + + private ReservedSpaceCatalog rsc; + + public SimpleSpaceUpdaterHelper() { + rsc = ReservedSpaceCatalog.getInstance(); + } + + private StorageSpaceData getStorageSpaceDataForVFS(VirtualFSInterface vfs) { + + return rsc.getStorageSpaceByAlias(vfs.getSpaceTokenDescription()); + } + + @Override + public boolean increaseUsedSpace(VirtualFSInterface vfs, long size) { + + log.debug("Increase {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to add is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be increased!", vfs.getAliasName()); + return true; + } + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return rsc.increaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } + + @Override + public boolean decreaseUsedSpace(VirtualFSInterface vfs, long size) { + + log.debug("Decrease {} used space: {} bytes ", vfs.getAliasName(), size); + + if (size < 0) { + log.error("Size to remove is a negative value: {}", size); + return false; + } + if (size == 0) { + log.debug("Size is zero, vfs {} used space won't be decreased!", vfs.getAliasName()); + return true; + } + + log.debug("Get StorageSpaceData from vfs ..."); + StorageSpaceData ssd = getStorageSpaceDataForVFS(vfs); + + if (ssd == null) { + log.error("Unable to get StorageSpaceData from alias name {}", vfs.getAliasName()); + return false; + } + + return rsc.decreaseUsedSpace(ssd.getSpaceToken().getValue(), size); + } } diff --git a/src/main/java/it/grid/storm/space/SpaceHelper.java b/src/main/java/it/grid/storm/space/SpaceHelper.java index 0f85454a7..7d882c079 100644 --- a/src/main/java/it/grid/storm/space/SpaceHelper.java +++ b/src/main/java/it/grid/storm/space/SpaceHelper.java @@ -17,40 +17,29 @@ package it.grid.storm.space; -import it.grid.storm.catalogs.InvalidRetrievedDataException; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; -import it.grid.storm.catalogs.MultipleDataEntriesException; -import it.grid.storm.catalogs.NoDataFoundException; -import it.grid.storm.catalogs.ReducedPtPChunkData; +import java.util.Iterator; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; -import it.grid.storm.config.Configuration; -import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; -import it.grid.storm.namespace.NamespaceDirector; -import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.srm.types.ArrayOfTSpaceToken; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.TLifeTimeInSeconds; -import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSizeInBytes; import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TSpaceType; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * This class is part of the StoRM project. Copyright (c) 2008 INFN-CNAF. *

@@ -65,365 +54,304 @@ public class SpaceHelper { - private static final int ADD_FREE_SPACE = 0; - private static final int REMOVE_FREE_SPACE = 1; - private Configuration config; - private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); - public static GridUserInterface storageAreaOwner = GridUserManager - .makeSAGridUser(); - - public SpaceHelper() { - - config = Configuration.getInstance(); - } - - public boolean isSAFull(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { - log.debug("AvailableSize={}" , spaceData.getAvailableSpaceSize().value()); - return true; - } else { - return false; - } - - } - - public long getSAFreeSpace(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is full"); - - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null) { - return spaceData.getAvailableSpaceSize().value(); - } else { - return -1; - } - - } - - /** - * Verifies if the storage area to which the provided stori belongs has been - * initialized The verification is made on used space field - * - * @param log - * @param stori - * @return - */ - public boolean isSAInitialized(Logger log, StoRI stori) { - - log.debug("Checking if the Storage Area is initialized"); - if (stori == null) { - throw new IllegalArgumentException( - "Unable to perform the SA initialization check, provided null parameters: log : " - + log + " , stori : " + stori); - } - boolean response = false; - VirtualFSInterface fs = stori.getVirtualFileSystem(); - ReservedSpaceCatalog catalog = new ReservedSpaceCatalog(); - // Get StorageSpaceData from the database - String ssDesc = fs.getSpaceTokenDescription(); - - StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); - - if (spaceData != null && spaceData.getUsedSpaceSize() != null - && !spaceData.getUsedSpaceSize().isEmpty() - && spaceData.getUsedSpaceSize().value() >= 0) { - - response = true; - } - log.debug("The storage area is initialized with token alias {} is {} initialized" - , spaceData.getSpaceTokenAlias() , (response ? "" : "not")); - return response; - } - - /** - * - * @param log - * @param stori - * @return - */ - public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { - - log.debug("SpaceHelper: getting space token from StoRI"); - VirtualFSInterface fs = stori.getVirtualFileSystem(); - return fs.getSpaceToken(); - - } - - /** - * Returns the spaceTokens associated to the 'user' AND 'spaceAlias'. If - * 'spaceAlias' is NULL or an empty string then this method returns all the - * space tokens this 'user' owns. - * - * @param user - * VomsGridUser user. - * @param spaceAlias - * User space token description. - */ - private Boolean isDefaultSpaceToken(TSpaceToken token) { - - Boolean found = false; - - config = Configuration.getInstance(); - List tokens = config.getListOfDefaultSpaceToken(); - for (int i = 0; i < tokens.size(); i++) { - if ((tokens.get(i)).toLowerCase().equals(token.getValue().toLowerCase())) { - found = true; - } - } - - return found; - } - - /** - * This method is used by the namespace parser component to insert a new Space - * Token Description data into the space catalog. In this way a standard Space - * Token is created, making it work for the GetSpaceMetaData request an - * SrmPreparateToPut with SpaceToken. - * - * The following code check if a SA_token with the same space description is - * already present into the catalog, if no data are found the new data are - * inserted, if yes the new data and the data already present are compared, - * and if needed an update operation is performed. - * - * The mandatory parameters are: - * - * @param spaceTokenAlias - * the space token description the user have to specify into the - * namespace.xml file - * @param totalOnLineSize - * the size the user have to specify into the namespace.xml file - * @param date - * @param spaceFileName - * the space file name will be used to get the free size. It is the - * StFNRoot. - */ - - public TSpaceToken createVOSA_Token(String spaceTokenAlias, - TSizeInBytes totalOnLineSize, String spaceFileName) { - - // TODO errors are not managed in this function - TSpaceToken spaceToken = null; - ArrayOfTSpaceToken tokenArray; - ReservedSpaceCatalog spaceCatalog = new ReservedSpaceCatalog(); - - // Try with fake user, if it does not work remove it and use different - // method - - // First, check if the same VOSpaceArea already exists - tokenArray = spaceCatalog.getSpaceTokensByAlias(spaceTokenAlias); - - if (tokenArray == null || tokenArray.size() == 0) { - // the VOSpaceArea does not exist yet - SpaceHelper.log.debug("VoSpaceArea {} still does not exists. Start creation process." , spaceTokenAlias); - - PFN sfname = null; - try { - sfname = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e1) { - log.error("Error building PFN with {} : " , spaceFileName , e1); - } - - StorageSpaceData ssd = null; - - try { - ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, - spaceTokenAlias, totalOnLineSize, totalOnLineSize, - TLifeTimeInSeconds.makeInfinite(), null, null, sfname); - // ssd.setReservedSpaceSize(totalOnLineSize); - try { - ssd.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - ssd.setReservedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); - - } catch (InvalidTSizeAttributesException e) { - // never thrown - log.error("Unexpected InvalidTSizeAttributesException: {}" - , e.getMessage(),e); - } - spaceToken = ssd.getSpaceToken(); - } catch (InvalidSpaceDataAttributesException e) { - log.error("Error building StorageSpaceData: " , e); - } - - try { - spaceCatalog.addStorageSpace(ssd); - } catch (DataAccessException e) { - log.error("Error storing StorageSpaceData on the DB: " , e); - } - // Track into global set to remove obsolete SA_token - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - /* - * the VOspaceArea already exists. Compare new data and data already - * present to check if the parameter has changed or not, and then perform - * update operation into catalog if it is needed. Only static information - * changes determine an update of the exeisting row - */ - SpaceHelper.log.debug("VOSpaceArea for space token description " - + spaceTokenAlias + " already present into DB."); - - boolean equal = false; - spaceToken = tokenArray.getTSpaceToken(0); - StorageSpaceData catalog_ssd = null; - try { - catalog_ssd = spaceCatalog.getStorageSpace(spaceToken); - } catch (TransferObjectDecodingException e) { - log - .error("Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}" - , e.getMessage(),e); - } catch (DataAccessException e) { - log.error("Unable to build get StorageSpaceTO. DataAccessException: {}" - , e.getMessage(),e); - } - - if (catalog_ssd != null) { - - if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) - && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) - && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize - .value()) - && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { - equal = true; - } - - } - - // false otherwise - if (equal) { - // Do nothing if equals, everything are already present into - // the DB - SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date." - , spaceTokenAlias); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } else { - // If the new data has been modified, update the data into the - // catalog - SpaceHelper.log.debug("VOSpaceArea for space token description {} is different in some parameters. Updating the catalog." - , spaceTokenAlias); - try { - catalog_ssd.setOwner(storageAreaOwner); - catalog_ssd.setTotalSpaceSize(totalOnLineSize); - catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); - - PFN sfn = null; - try { - sfn = PFN.make(spaceFileName); - } catch (InvalidPFNAttributeException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - catalog_ssd.setSpaceFileName(sfn); - - spaceCatalog.updateAllStorageSpace(catalog_ssd); - ReservedSpaceCatalog.addSpaceToken(spaceToken); - - } catch (NoDataFoundException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (InvalidRetrievedDataException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (MultipleDataEntriesException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - - // Warning. CHeck if there are multiple token with same alisa, this - // is not allowed - if (tokenArray.size() > 1) { - SpaceHelper.log - .error("Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!" - , spaceTokenAlias); - } - - } - return spaceToken; - - } - - /** - * This method should be use at the end of the namespace insert process - * (through the createVO_SA_token(...)) to remmove from the database the old - * VO_SA_token inserted from the previous namsespace.xml configuration - * - */ - public void purgeOldVOSA_token() { - - purgeOldVOSA_token(SpaceHelper.log); - } - - public void purgeOldVOSA_token(Logger log) { - - ReservedSpaceCatalog spacec = new ReservedSpaceCatalog(); - log.debug("VO SA: garbage collecting obsolete VOSA_token"); - - Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); - while (iter.hasNext()) { - log.debug("VO SA token REGISTRED: {}" , iter.next().getValue()); - } - - GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); - - // Remove obsolete space - ArrayOfTSpaceToken token_a = spacec.getSpaceTokens(stormServiceUser, null); - for (int i = 0; i < token_a.size(); i++) { - log.debug("VO SA token IN CATALOG: {}" , token_a.getTSpaceToken(i).getValue()); - } - - if ((token_a != null) && (token_a.size() > 0)) { - for (int i = 0; i < token_a.size(); i++) { - - if (!ReservedSpaceCatalog.getTokenSet().contains( - token_a.getTSpaceToken(i))) { - // This VOSA_token is no more used, removing it from persistence - TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); - log.debug("VO SA token {} is no more used, removing it from persistence." , tokenToRemove); - spacec.release(stormServiceUser, tokenToRemove); - } - } - } else { - log - .warn("Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); - } - - ReservedSpaceCatalog.clearTokenSet(); - - } - - /** - * @param spaceData - * @return - */ - public static boolean isStorageArea(StorageSpaceData spaceData) - throws IllegalArgumentException { - - if (spaceData == null) { - log.error("Received null spaceData parameter"); - throw new IllegalArgumentException("Received null spaceData parameter"); - } - boolean result = false; - if (spaceData.getOwner() != null) { - result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); - } - return result; - } + private static final Logger log = LoggerFactory.getLogger(SpaceHelper.class); + public static GridUserInterface storageAreaOwner = GridUserManager.makeSAGridUser(); + + private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + public boolean isSAFull(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFSInterface fs = stori.getVirtualFileSystem(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if ((spaceData != null) && (spaceData.getAvailableSpaceSize().value() == 0)) { + log.debug("AvailableSize={}", spaceData.getAvailableSpaceSize().value()); + return true; + } else { + return false; + } + + } + + public long getSAFreeSpace(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is full"); + + VirtualFSInterface fs = stori.getVirtualFileSystem(); + + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if (spaceData != null) { + return spaceData.getAvailableSpaceSize().value(); + } else { + return -1; + } + + } + + /** + * Verifies if the storage area to which the provided stori belongs has been initialized The + * verification is made on used space field + * + * @param log + * @param stori + * @return + */ + public boolean isSAInitialized(Logger log, StoRI stori) { + + log.debug("Checking if the Storage Area is initialized"); + if (stori == null) { + throw new IllegalArgumentException( + "Unable to perform the SA initialization check, provided null parameters: log : " + log + + " , stori : " + stori); + } + boolean response = false; + VirtualFSInterface fs = stori.getVirtualFileSystem(); + // Get StorageSpaceData from the database + String ssDesc = fs.getSpaceTokenDescription(); + + StorageSpaceData spaceData = catalog.getStorageSpaceByAlias(ssDesc); + + if (spaceData != null && spaceData.getUsedSpaceSize() != null + && !spaceData.getUsedSpaceSize().isEmpty() && spaceData.getUsedSpaceSize().value() >= 0) { + + response = true; + } + log.debug("The storage area is initialized with token alias {} is {} initialized", + spaceData.getSpaceTokenAlias(), (response ? "" : "not")); + return response; + } + + /** + * + * @param log + * @param stori + * @return + */ + public TSpaceToken getTokenFromStoRI(Logger log, StoRI stori) { + + log.debug("SpaceHelper: getting space token from StoRI"); + VirtualFSInterface fs = stori.getVirtualFileSystem(); + return fs.getSpaceToken(); + + } + + /** + * This method is used by the namespace parser component to insert a new Space Token Description + * data into the space catalog. In this way a standard Space Token is created, making it work for + * the GetSpaceMetaData request an SrmPreparateToPut with SpaceToken. + * + * The following code check if a SA_token with the same space description is already present into + * the catalog, if no data are found the new data are inserted, if yes the new data and the data + * already present are compared, and if needed an update operation is performed. + * + * The mandatory parameters are: + * + * @param spaceTokenAlias the space token description the user have to specify into the + * namespace.xml file + * @param totalOnLineSize the size the user have to specify into the namespace.xml file + * @param date + * @param spaceFileName the space file name will be used to get the free size. It is the StFNRoot. + */ + + public TSpaceToken createVOSA_Token(String spaceTokenAlias, TSizeInBytes totalOnLineSize, + String spaceFileName) { + + TSpaceToken spaceToken = null; + ArrayOfTSpaceToken tokenArray; + + // Try with fake user, if it does not work remove it and use different + // method + + // First, check if the same VOSpaceArea already exists + tokenArray = catalog.getSpaceTokensByAlias(spaceTokenAlias); + + if (tokenArray == null || tokenArray.size() == 0) { + // the VOSpaceArea does not exist yet + SpaceHelper.log.debug("VoSpaceArea {} still does not exists. Start creation process.", + spaceTokenAlias); + + PFN sfname = null; + try { + sfname = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e1) { + log.error("Error building PFN with {} : ", spaceFileName, e1); + } + + StorageSpaceData ssd = null; + + try { + ssd = new StorageSpaceData(storageAreaOwner, TSpaceType.VOSPACE, spaceTokenAlias, + totalOnLineSize, totalOnLineSize, TLifeTimeInSeconds.makeInfinite(), null, null, + sfname); + // ssd.setReservedSpaceSize(totalOnLineSize); + try { + ssd.setUnavailableSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); + ssd.setReservedSpaceSize(TSizeInBytes.make(0, SizeUnit.BYTES)); + + } catch (InvalidTSizeAttributesException e) { + // never thrown + log.error("Unexpected InvalidTSizeAttributesException: {}", e.getMessage(), e); + } + spaceToken = ssd.getSpaceToken(); + } catch (InvalidSpaceDataAttributesException e) { + log.error("Error building StorageSpaceData: ", e); + } + + try { + catalog.addStorageSpace(ssd); + } catch (DataAccessException e) { + log.error("Error storing StorageSpaceData on the DB: ", e); + } + // Track into global set to remove obsolete SA_token + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + /* + * the VOspaceArea already exists. Compare new data and data already present to check if the + * parameter has changed or not, and then perform update operation into catalog if it is + * needed. Only static information changes determine an update of the exeisting row + */ + SpaceHelper.log.debug("VOSpaceArea for space token description " + spaceTokenAlias + + " already present into DB."); + + boolean equal = false; + spaceToken = tokenArray.getTSpaceToken(0); + StorageSpaceData catalog_ssd = null; + try { + catalog_ssd = catalog.getStorageSpace(spaceToken); + } catch (TransferObjectDecodingException e) { + log.error( + "Unable to build StorageSpaceData from StorageSpaceTO. TransferObjectDecodingException: {}", + e.getMessage(), e); + } catch (DataAccessException e) { + log.error("Unable to build get StorageSpaceTO. DataAccessException: {}", e.getMessage(), e); + } + + if (catalog_ssd != null) { + + if (catalog_ssd.getOwner().getDn().equals(storageAreaOwner.getDn()) + && (catalog_ssd.getSpaceTokenAlias().equals(spaceTokenAlias)) + && (catalog_ssd.getTotalSpaceSize().value() == totalOnLineSize.value()) + && (catalog_ssd.getSpaceFileName().toString().equals(spaceFileName))) { + equal = true; + } + + } + + // false otherwise + if (equal) { + // Do nothing if equals, everything are already present into + // the DB + SpaceHelper.log.debug("VOSpaceArea for space token description {} is already up to date.", + spaceTokenAlias); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } else { + // If the new data has been modified, update the data into the + // catalog + SpaceHelper.log.debug( + "VOSpaceArea for space token description {} is different in some parameters. Updating the catalog.", + spaceTokenAlias); + catalog_ssd.setOwner(storageAreaOwner); + catalog_ssd.setTotalSpaceSize(totalOnLineSize); + catalog_ssd.setTotalGuaranteedSize(totalOnLineSize); + + PFN sfn = null; + try { + sfn = PFN.make(spaceFileName); + } catch (InvalidPFNAttributeException e) { + e.printStackTrace(); + } + catalog_ssd.setSpaceFileName(sfn); + + catalog.updateAllStorageSpace(catalog_ssd); + ReservedSpaceCatalog.addSpaceToken(spaceToken); + + } + + // Warning. CHeck if there are multiple token with same alisa, this + // is not allowed + if (tokenArray.size() > 1) { + SpaceHelper.log.error( + "Error: multiple Space Token found for the same space Alias: {}. Only one has been evaluated!", + spaceTokenAlias); + } + + } + return spaceToken; + + } + + /** + * This method should be use at the end of the namespace insert process (through the + * createVO_SA_token(...)) to remmove from the database the old VO_SA_token inserted from the + * previous namsespace.xml configuration + * + */ + public void purgeOldVOSA_token() { + + purgeOldVOSA_token(SpaceHelper.log); + } + + public void purgeOldVOSA_token(Logger log) { + + log.debug("VO SA: garbage collecting obsolete VOSA_token"); + + Iterator iter = ReservedSpaceCatalog.getTokenSet().iterator(); + while (iter.hasNext()) { + log.debug("VO SA token REGISTRED: {}", iter.next().getValue()); + } + + GridUserInterface stormServiceUser = GridUserManager.makeSAGridUser(); + + // Remove obsolete space + ArrayOfTSpaceToken token_a = catalog.getSpaceTokens(stormServiceUser, null); + for (int i = 0; i < token_a.size(); i++) { + log.debug("VO SA token IN CATALOG: {}", token_a.getTSpaceToken(i).getValue()); + } + + if ((token_a != null) && (token_a.size() > 0)) { + for (int i = 0; i < token_a.size(); i++) { + + if (!ReservedSpaceCatalog.getTokenSet().contains(token_a.getTSpaceToken(i))) { + // This VOSA_token is no more used, removing it from persistence + TSpaceToken tokenToRemove = token_a.getTSpaceToken(i); + log.debug("VO SA token {} is no more used, removing it from persistence.", + tokenToRemove); + catalog.release(stormServiceUser, tokenToRemove); + } + } + } else { + log.warn( + "Space Catalog garbage SA_Token: no SA TOKENs specified. Please check your namespace.xml file."); + } + + ReservedSpaceCatalog.clearTokenSet(); + + } + + /** + * @param spaceData + * @return + */ + public static boolean isStorageArea(StorageSpaceData spaceData) throws IllegalArgumentException { + + if (spaceData == null) { + log.error("Received null spaceData parameter"); + throw new IllegalArgumentException("Received null spaceData parameter"); + } + boolean result = false; + if (spaceData.getOwner() != null) { + result = spaceData.getOwner().equals(SpaceHelper.storageAreaOwner); + } + return result; + } } diff --git a/src/main/java/it/grid/storm/space/StorageSpaceData.java b/src/main/java/it/grid/storm/space/StorageSpaceData.java index 2aaa7d184..c2f8b349d 100644 --- a/src/main/java/it/grid/storm/space/StorageSpaceData.java +++ b/src/main/java/it/grid/storm/space/StorageSpaceData.java @@ -27,7 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.common.types.InvalidPFNAttributeException; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; @@ -35,6 +34,7 @@ import it.grid.storm.common.types.VO; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.griduser.GridUserManager; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.StorageSpaceTO; import it.grid.storm.srm.types.InvalidTSizeAttributesException; import it.grid.storm.srm.types.InvalidTSpaceTokenAttributesException; diff --git a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java index 0f1a7fa07..8e4a9b433 100644 --- a/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java +++ b/src/main/java/it/grid/storm/space/gpfsquota/GPFSQuotaManager.java @@ -217,14 +217,14 @@ private void handleNoLimitsQuota(GPFSFilesetQuotaInfo info, StorageSpaceData ssd private StorageSpaceData getStorageSpaceDataForVFS(VirtualFSInterface vfs) { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); String spaceToken = vfs.getSpaceTokenDescription(); return rsc.getStorageSpaceByAlias(spaceToken); } private void persistStorageSpaceData(StorageSpaceData ssd) throws DataAccessException { - ReservedSpaceCatalog rsc = new ReservedSpaceCatalog(); + ReservedSpaceCatalog rsc = ReservedSpaceCatalog.getInstance(); rsc.updateStorageSpace(ssd); } diff --git a/src/main/java/it/grid/storm/srm/types/TRequestType.java b/src/main/java/it/grid/storm/srm/types/TRequestType.java index d1e998cf6..7666acc96 100644 --- a/src/main/java/it/grid/storm/srm/types/TRequestType.java +++ b/src/main/java/it/grid/storm/srm/types/TRequestType.java @@ -18,8 +18,8 @@ package it.grid.storm.srm.types; /** - * This class represents the ReqType of an SRM request. It is a simple - * application of the TypeSafe Enum Pattern. + * This class represents the ReqType of an SRM request. It is a simple application of the TypeSafe + * Enum Pattern. * * @author EGRID ICTP Trieste / CNAF Bologna * @date March 18th, 2005 @@ -27,44 +27,28 @@ */ public enum TRequestType { - PREPARE_TO_GET("PrepareToGet"), PREPARE_TO_PUT("PrepareToPut"), COPY("Copy"), BRING_ON_LINE( - "BringOnLine"), EMPTY("Empty"), UNKNOWN("Unknown"); + PREPARE_TO_GET("PrepareToGet"), PREPARE_TO_PUT("PrepareToPut"), COPY("Copy"), BRING_ON_LINE( + "BringOnLine"), EMPTY("Empty"); - private final String value; + private final String value; - private TRequestType(String value) { + private TRequestType(String value) { - this.value = value; - } + this.value = value; + } - public String getValue() { + public String getValue() { - return value; - } + return value; + } - /** - * Facility method that returns a TRequestType object given its String - * representation. If no TRequestType is found for the given String, an - * IllegalArgumentException is thrown. - */ - public static TRequestType getTRequestType(String type) - throws IllegalArgumentException { + public boolean isEmpty() { - for (TRequestType requestType : TRequestType.values()) { - if (requestType.getValue().equals(type)) { - return requestType; - } - } - return UNKNOWN; - } + return this.equals(EMPTY); + } - public boolean isEmpty() { + public String toString() { - return this.equals(EMPTY); - } - - public String toString() { - - return value; - } + return value; + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java index 8b234920d..b1823bdf3 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/CommandException.java @@ -36,15 +36,4 @@ public CommandException(String message) { super(message); } - - public CommandException(Throwable cause) { - - super(cause); - } - - public CommandException(String message, Throwable cause) { - - super(message, cause); - } - } diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java index d51efd3c3..98ab5c891 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/FileTransferRequestStatusCommand.java @@ -1,11 +1,18 @@ package it.grid.storm.synchcall.command.datatransfer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.RequestSummaryCatalog; -import it.grid.storm.catalogs.RequestSummaryData; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.griduser.GridUserInterface; +import it.grid.storm.persistence.model.RequestSummaryData; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; import it.grid.storm.srm.types.TRequestType; @@ -21,14 +28,6 @@ import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestFilesInputData; import it.grid.storm.synchcall.data.datatransfer.ManageFileTransferRequestInputData; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public abstract class FileTransferRequestStatusCommand extends DataTransferCommand implements Command { diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java index c3710558a..791ab9c65 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/PtPAbortExecutor.java @@ -27,7 +27,6 @@ import it.grid.storm.authz.AuthzException; import it.grid.storm.catalogs.PtPChunkCatalog; -import it.grid.storm.catalogs.PtPPersistentChunkData; import it.grid.storm.catalogs.RequestSummaryCatalog; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; @@ -40,6 +39,7 @@ import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; +import it.grid.storm.persistence.model.PtPPersistentChunkData; import it.grid.storm.srm.types.ArrayOfSURLs; import it.grid.storm.srm.types.ArrayOfTSURLReturnStatus; import it.grid.storm.srm.types.TRequestToken; diff --git a/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java b/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java index 0850ecf93..5d3671972 100644 --- a/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java +++ b/src/main/java/it/grid/storm/synchcall/command/datatransfer/RequestUnknownException.java @@ -15,15 +15,4 @@ public RequestUnknownException(String message) { super(message); } - - public RequestUnknownException(Throwable cause) { - - super(cause); - } - - public RequestUnknownException(String message, Throwable cause) { - - super(message, cause); - } - } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java index c45168944..dd82e51a2 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MkdirCommand.java @@ -17,13 +17,29 @@ package it.grid.storm.synchcall.command.directory; +import static it.grid.storm.filesystem.FilesystemPermission.ListTraverse; +import static it.grid.storm.filesystem.FilesystemPermission.ListTraverseWrite; +import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; +import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; +import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; +import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmAuthorizationFailure; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmFailure; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmInternalError; +import static it.grid.storm.synchcall.command.directory.MkdirException.srmInvalidPath; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.Lists; + import it.grid.storm.acl.AclManager; import it.grid.storm.acl.AclManagerFS; import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.config.Configuration; import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; @@ -42,7 +58,6 @@ import it.grid.storm.srm.types.SRMCommandException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -53,25 +68,6 @@ import it.grid.storm.synchcall.data.directory.MkdirInputData; import it.grid.storm.synchcall.data.directory.MkdirOutputData; -import static it.grid.storm.filesystem.FilesystemPermission.ListTraverse; -import static it.grid.storm.filesystem.FilesystemPermission.ListTraverseWrite; -import static it.grid.storm.srm.types.TStatusCode.SRM_AUTHORIZATION_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_DUPLICATION_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_FAILURE; -import static it.grid.storm.srm.types.TStatusCode.SRM_INTERNAL_ERROR; -import static it.grid.storm.srm.types.TStatusCode.SRM_INVALID_PATH; -import static it.grid.storm.srm.types.TStatusCode.SRM_SUCCESS; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmAuthorizationFailure; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmFailure; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmInternalError; -import static it.grid.storm.synchcall.command.directory.MkdirException.srmInvalidPath; -import static java.lang.String.format; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; - class MkdirException extends SRMCommandException { private static final long serialVersionUID = 1L; @@ -85,10 +81,6 @@ public static MkdirException srmInvalidPath(String message) { return new MkdirException(SRM_INVALID_PATH, message); } - public static MkdirException srmDuplicationError(String message) { - return new MkdirException(SRM_DUPLICATION_ERROR, message); - } - public static MkdirException srmInternalError(String message) { return new MkdirException(SRM_INTERNAL_ERROR, message); } @@ -237,22 +229,6 @@ private boolean isAnonymous(GridUserInterface user) { private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws MkdirException { - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MD); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.MD); - } - if (!isSpaceAuthorized) { - String msg = - format("User not authorized to perform srmMkdir request on the storage area: %s", token); - log.debug("srmMkdir:{}", msg); - throw srmAuthorizationFailure(msg); - } - AuthzDecision decision; if (isAnonymous(user)) { decision = @@ -296,13 +272,10 @@ private void manageAcl(StoRI stori, GridUserInterface user) { configuration.getEnableWritePermOnDirectory() ? ListTraverseWrite : ListTraverse; try { - if (isAnonymous(user)) { - manageDefaultACL(stori.getLocalFile(), permission); - setHttpsServiceAcl(stori.getLocalFile(), permission); - } else { + if (!isAnonymous(user)) { setAcl(user, stori.getLocalFile(), stori.hasJustInTimeACLs(), permission); - manageDefaultACL(stori.getLocalFile(), permission); } + manageDefaultACL(stori.getLocalFile(), permission); } catch (NamespaceException | CannotMapUserException e) { log.error("srmMkdir: Unable to set ACL [{}]", e.getMessage()); } @@ -350,12 +323,6 @@ private void manageDefaultACL(LocalFile dir, FilesystemPermission permission) } } - private void setHttpsServiceAcl(LocalFile file, FilesystemPermission permission) { - - log.debug("SrmMkdir: Adding default ACL for directory {}: {}", file, permission); - aclManager.grantHttpsServiceGroupPermission(file, permission); - } - private void printRequestOutcome(TReturnStatus status, MkdirInputData inputData) { if (inputData != null) { diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java index 3197b2786..fdd78a27e 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/MvCommand.java @@ -17,29 +17,26 @@ package it.grid.storm.synchcall.command.directory; -import it.grid.storm.acl.AclManagerFS; +import java.util.Arrays; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; -import it.grid.storm.filesystem.FilesystemPermission; import it.grid.storm.filesystem.LocalFile; -import it.grid.storm.griduser.CannotMapUserException; -import it.grid.storm.griduser.LocalUser; import it.grid.storm.namespace.InvalidSURLException; import it.grid.storm.namespace.NamespaceDirector; import it.grid.storm.namespace.NamespaceException; import it.grid.storm.namespace.NamespaceInterface; import it.grid.storm.namespace.StoRI; import it.grid.storm.namespace.UnapprochableSurlException; -import it.grid.storm.space.SpaceHelper; import it.grid.storm.srm.types.InvalidTSURLAttributesException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -51,15 +48,9 @@ import it.grid.storm.synchcall.data.directory.MvInputData; import it.grid.storm.synchcall.data.directory.MvOutputData; -import java.util.Arrays; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project This class implements the SrmMv - * Command. + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project This class implements the SrmMv Command. * * @author lucamag * @date May 28, 2008 @@ -69,563 +60,422 @@ public class MvCommand extends DirectoryCommand implements Command { public static final Logger log = LoggerFactory.getLogger(MvCommand.class); - private static final String SRM_COMMAND = "SrmMv"; - private final NamespaceInterface namespace; - - public MvCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmMv functionality. - * - * @param inputData - * Contains information about input data for Mv request. - * @return outputData Contains output data - */ - public OutputData execute(InputData data) { - - log.debug("srmMv: Start execution."); - MvOutputData outputData = new MvOutputData(); - MvInputData inputData = (MvInputData) data; - - /** - * Validate MvInputData. The check is done at this level to separate - * internal StoRM logic from xmlrpc specific operation. - */ - - if ((inputData == null) || (inputData.getFromSURL() == null) - || (inputData.getToSURL() == null)) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, - "Invalid parameter specified.")); - log.warn("srmMv: Request failed with [status: {}]", - outputData.getStatus()); - - return outputData; - } - - TSURL fromSURL = inputData.getFromSURL(); - - if (fromSURL.isEmpty()) { - log.warn("srmMv: unable to perform the operation, empty fromSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSURL toSURL = inputData.getToSURL(); - - if (toSURL.isEmpty()) { - log.error("srmMv: unable to perform the operation, empty toSurl"); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI fromStori = null; - try { - if (inputData instanceof IdentityInputData) { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - } - } else { - try { - fromStori = namespace.resolveStoRIbySURL(fromSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_REQUEST, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - StoRI toStori = null;; - try { - if (inputData instanceof IdentityInputData) { - try { - toStori = namespace.resolveStoRIbySURL(toSURL, - ((IdentityInputData) inputData).getUser()); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - fromSURL, - DataHelper.getRequestor(inputData), - e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - try { - toStori = namespace.resolveStoRIbySURL(toSURL); - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {}. {}",fromSURL - ,e.getMessage()); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - } catch (IllegalArgumentException e) { - log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL - ,e.getMessage(),e); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, - "Unable to build StoRI by destination SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - TSpaceToken token = new SpaceHelper().getTokenFromStoRI(log, fromStori); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (inputData instanceof IdentityInputData) { - isSpaceAuthorized = spaceAuth.authorize( - ((IdentityInputData) inputData).getUser(), SRMSpaceRequest.MV); - } else { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.MV); - } - if (!isSpaceAuthorized) { - log.debug("srmMv: User not authorized to perform srmMv on SA: {}", token); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, - ": User not authorized to perform srmMv on SA: " + token)); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (fromStori.getLocalFile().getPath() - .compareTo(toStori.getLocalFile().getPath()) == 0) { - outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, - "Source SURL and target SURL are the same file.")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - if (toStori.getLocalFile().exists()) { - if (toStori.getLocalFile().isDirectory()) { - try { - toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); - } catch (IllegalArgumentException e) { - log.debug("srmMv : Unable to build StoRI for SURL {}. {}", - toSURL, e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, "Unable to build StoRI by SURL")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (UnapprochableSurlException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidTSURLAttributesException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (NamespaceException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } catch (InvalidSURLException e) { - log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", - toSURL, DataHelper.getRequestor(inputData),e.getMessage()); - - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_INVALID_PATH, e.getMessage())); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } else { - log.debug("srmMv : destination SURL {} already exists.", toSURL); - outputData.setStatus(CommandHelper - .buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, - "destination SURL already exists!")); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - } - - AuthzDecision sourceDecision; - if (inputData instanceof IdentityInputData) { - sourceDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, - fromStori, toStori); - } else { - sourceDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_source, fromStori, toStori); - } - AuthzDecision destinationDecision; - if (inputData instanceof IdentityInputData) { - destinationDecision = AuthzDirector.getPathAuthz().authorize( - ((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, - fromStori, toStori); - } else { - destinationDecision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.MV_dest, fromStori, toStori); - } - TReturnStatus returnStatus; - if ((sourceDecision.equals(AuthzDecision.PERMIT)) - && (destinationDecision.equals(AuthzDecision.PERMIT))) { - - log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", - DataHelper.getRequestor(inputData), - fromStori.getPFN(), - toStori.getPFN()); - - returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); - if (returnStatus.isSRM_SUCCESS()) { - LocalUser user = null; - if (inputData instanceof IdentityInputData) { - try { - user = ((IdentityInputData) inputData).getUser().getLocalUser(); - } catch (CannotMapUserException e) { - log - .warn("srmMv: user mapping error {}", e.getMessage()); - - if (log.isDebugEnabled()){ - log.error(e.getMessage(),e); - } - - returnStatus - .extendExplaination("unable to set user acls on the destination file"); - } - } - if (user != null) { - setAcl(fromStori, toStori, user); - } else { - setAcl(fromStori, toStori); - } - } else { - log.warn("srmMv: <{}> Request for [fromSURL={}; toSURL={}] failed with [status: {}]", - DataHelper.getRequestor(inputData), - fromSURL, - toSURL, - returnStatus); - } - } else { - - String errorMsg = "Authorization error"; - - if (sourceDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to create and/or write the destination file"; - } else { - if (destinationDecision.equals(AuthzDecision.PERMIT)) { - errorMsg = - "User is not authorized to read and/or delete the source file"; - } else { - errorMsg = - "User is neither authorized to read and/or delete the source file " - + "nor to create and/or write the destination file"; - } - } - - returnStatus = - CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, - errorMsg); - } - outputData.setStatus(returnStatus); - printRequestOutcome(outputData.getStatus(), inputData); - return outputData; - } - - private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, - InputData inputData) throws IllegalArgumentException, - InvalidTSURLAttributesException, UnapprochableSurlException, - NamespaceException, InvalidSURLException { - - StoRI toStori; - String toSURLString = toSURL.getSURLString(); - if (!(toSURLString.endsWith("/"))) { - toSURLString += "/"; - } - toSURLString += fromStori.getFilename(); - log.debug("srmMv: New toSURL: {}", toSURLString); - if (inputData instanceof IdentityInputData) { - toStori = namespace.resolveStoRIbySURL( - TSURL.makeFromStringValidate(toSURLString), - ((IdentityInputData) inputData).getUser()); - } else { - toStori = namespace.resolveStoRIbySURL(TSURL - .makeFromStringValidate(toSURLString)); - } - return toStori; - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI) { - - try { - AclManagerFS.getInstance().moveHttpsPermissions( - oldFileStoRI.getLocalFile(), newFileStoRI.getLocalFile()); - } catch (IllegalArgumentException e) { - log - .error("Unable to move permissions from the old to the new file.{}", - e.getMessage(), e); - } - } - - private void setAcl(StoRI oldFileStoRI, StoRI newFileStoRI, - LocalUser localUser) { - - setAcl(oldFileStoRI, newFileStoRI); - if (newFileStoRI.hasJustInTimeACLs()) { - // JiT - try { - AclManagerFS.getInstance().grantHttpsUserPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant user read and write permission on file. {}", - e.getMessage(), - e); - } - } else { - // AoT - try { - AclManagerFS.getInstance().grantHttpsGroupPermission( - newFileStoRI.getLocalFile(), localUser, - FilesystemPermission.ReadWrite); - } catch (IllegalArgumentException e) { - log - .error("Unable to grant group read and write permission on file. {}" - ,e.getMessage(),e); - } - } - } - - /** - * Split PFN , recursive creation is not supported, as reported at page 16 of - * Srm v2.1 spec. - * - * @param user - * VomsGridUser - * @param LocalFile - * fromFile - * @param LocalFile - * toFile - * @return TReturnStatus - */ - private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { - - boolean creationDone; - - String explanation = ""; - TStatusCode statusCode = TStatusCode.EMPTY; - - LocalFile fromFile = fromStori.getLocalFile(); - LocalFile toParent = toFile.getParentFile(); - - /* - * Controllare che File sorgente esiste Esiste directory destinazione(che - * esista e sia directory) Non esiste file deestinazione - */ - - boolean sourceExists = false; - boolean targetDirExists = false; - boolean targetFileExists = false; - - if (fromFile != null) { - sourceExists = fromFile.exists(); - } - - if (toParent != null) { - targetDirExists = toParent.exists() && toParent.isDirectory(); - } - - if (toFile != null) { - targetFileExists = toFile.exists(); - } - - if (sourceExists && targetDirExists && !targetFileExists) { - - SURLStatusManager checker = SURLStatusManagerFactory - .newSURLStatusManager(); - - if(checker.isSURLBusy(fromStori.getSURL())){ - log - .debug("srmMv request failure: fromSURL is busy."); - explanation = "There is an active SrmPrepareToPut on from SURL."; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Check if there is an active SrmPrepareToGet on the source SURL. In that - * case SrmMv() fails with SRM_FILE_BUSY. - */ - - if (checker.isSURLPinned(fromStori.getSURL())){ - log - .debug("SrmMv: requests fails because the source SURL is being used from other requests."); - explanation = "There is an active SrmPrepareToGet on from SURL"; - return CommandHelper - .buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); - } - - /** - * Perform the SrmMv() operation. - */ - creationDone = fromFile.renameTo(toFile.getPath()); - - if (creationDone) { - log.debug("SrmMv: Request success!"); - explanation = "SURL moved with success"; - statusCode = TStatusCode.SRM_SUCCESS; - } else { - log.debug("SrmMv: Requests fails because the path is invalid."); - explanation = "Invalid path"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } - - } else { - if (!sourceExists) { // and it is a file - log - .debug("SrmMv: request fails because the source SURL does not exists!"); - explanation = "Source SURL does not exists!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (!targetDirExists) { - log - .debug("SrmMv: request fails because the target directory does not exitts."); - explanation = "Target directory does not exits!"; - statusCode = TStatusCode.SRM_INVALID_PATH; - } else { - if (targetFileExists) { - log.debug("SrmMv: request fails because the target SURL exists."); - explanation = "Target SURL exists!"; - statusCode = TStatusCode.SRM_DUPLICATION_ERROR; - } else { - log.debug("SrmMv request failure! That is a BUG!"); - explanation = "That is a bug!"; - statusCode = TStatusCode.SRM_INTERNAL_ERROR; - } - } - } - } - - return CommandHelper.buildStatus(statusCode, explanation); - } - - private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { - - if (inputData != null) { - if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { - CommandHelper.printRequestOutcome( - SRM_COMMAND, - log, - status, - inputData, - Arrays.asList(new String[] { inputData.getFromSURL().toString(), - inputData.getFromSURL().toString() })); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + private static final String SRM_COMMAND = "SrmMv"; + private final NamespaceInterface namespace; + + public MvCommand() { + + namespace = NamespaceDirector.getNamespace(); + + } + + /** + * Method that provide SrmMv functionality. + * + * @param inputData Contains information about input data for Mv request. + * @return outputData Contains output data + */ + public OutputData execute(InputData data) { + + log.debug("srmMv: Start execution."); + MvOutputData outputData = new MvOutputData(); + MvInputData inputData = (MvInputData) data; + + /** + * Validate MvInputData. The check is done at this level to separate internal StoRM logic from + * xmlrpc specific operation. + */ + + if ((inputData == null) || (inputData.getFromSURL() == null) + || (inputData.getToSURL() == null)) { + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_FAILURE, "Invalid parameter specified.")); + log.warn("srmMv: Request failed with [status: {}]", outputData.getStatus()); + + return outputData; + } + + TSURL fromSURL = inputData.getFromSURL(); + + if (fromSURL.isEmpty()) { + log.warn("srmMv: unable to perform the operation, empty fromSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid fromSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + TSURL toSURL = inputData.getToSURL(); + + if (toSURL.isEmpty()) { + log.error("srmMv: unable to perform the operation, empty toSurl"); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI fromStori = null; + try { + if (inputData instanceof IdentityInputData) { + try { + fromStori = + namespace.resolveStoRIbySURL(fromSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + } + } else { + try { + fromStori = namespace.resolveStoRIbySURL(fromSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.warn("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage()); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_REQUEST, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + StoRI toStori = null;; + try { + if (inputData instanceof IdentityInputData) { + try { + toStori = namespace.resolveStoRIbySURL(toSURL, ((IdentityInputData) inputData).getUser()); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", fromSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + try { + toStori = namespace.resolveStoRIbySURL(toSURL); + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {}. {}", fromSURL, e.getMessage()); + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + } catch (IllegalArgumentException e) { + log.error("srmMv: Unable to build StoRI by SURL: {}. {}", fromSURL, e.getMessage(), e); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by destination SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (fromStori.getLocalFile().getPath().compareTo(toStori.getLocalFile().getPath()) == 0) { + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_SUCCESS, + "Source SURL and target SURL are the same file.")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + if (toStori.getLocalFile().exists()) { + if (toStori.getLocalFile().isDirectory()) { + try { + toStori = buildDestinationStoryForFolder(toSURL, fromStori, data); + } catch (IllegalArgumentException e) { + log.debug("srmMv : Unable to build StoRI for SURL {}. {}", toSURL, e.getMessage()); + + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, + "Unable to build StoRI by SURL")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (UnapprochableSurlException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidTSURLAttributesException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData.setStatus( + CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, "Invalid toSURL specified!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (NamespaceException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } catch (InvalidSURLException e) { + log.info("srmMv: Unable to build a stori for surl {} for user {}. {}", toSURL, + DataHelper.getRequestor(inputData), e.getMessage()); + + outputData + .setStatus(CommandHelper.buildStatus(TStatusCode.SRM_INVALID_PATH, e.getMessage())); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } else { + log.debug("srmMv : destination SURL {} already exists.", toSURL); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_DUPLICATION_ERROR, + "destination SURL already exists!")); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + } + + AuthzDecision sourceDecision; + if (inputData instanceof IdentityInputData) { + sourceDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_source, fromStori, + toStori); + } else { + sourceDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_source, fromStori, toStori); + } + AuthzDecision destinationDecision; + if (inputData instanceof IdentityInputData) { + destinationDecision = AuthzDirector.getPathAuthz() + .authorize(((IdentityInputData) inputData).getUser(), SRMFileRequest.MV_dest, fromStori, + toStori); + } else { + destinationDecision = AuthzDirector.getPathAuthz() + .authorizeAnonymous(SRMFileRequest.MV_dest, fromStori, toStori); + } + TReturnStatus returnStatus; + if ((sourceDecision.equals(AuthzDecision.PERMIT)) + && (destinationDecision.equals(AuthzDecision.PERMIT))) { + + log.debug("SrmMv: Mv authorized for user {}. Source: {}. Target: {}", + DataHelper.getRequestor(inputData), fromStori.getPFN(), toStori.getPFN()); + + returnStatus = manageAuthorizedMV(fromStori, toStori.getLocalFile()); + + } else { + + String errorMsg = "Authorization error"; + + if (sourceDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to create and/or write the destination file"; + } else { + if (destinationDecision.equals(AuthzDecision.PERMIT)) { + errorMsg = "User is not authorized to read and/or delete the source file"; + } else { + errorMsg = "User is neither authorized to read and/or delete the source file " + + "nor to create and/or write the destination file"; + } + } + + returnStatus = CommandHelper.buildStatus(TStatusCode.SRM_AUTHORIZATION_FAILURE, errorMsg); + } + outputData.setStatus(returnStatus); + printRequestOutcome(outputData.getStatus(), inputData); + return outputData; + } + + private StoRI buildDestinationStoryForFolder(TSURL toSURL, StoRI fromStori, InputData inputData) + throws IllegalArgumentException, InvalidTSURLAttributesException, UnapprochableSurlException, + NamespaceException, InvalidSURLException { + + StoRI toStori; + String toSURLString = toSURL.getSURLString(); + if (!(toSURLString.endsWith("/"))) { + toSURLString += "/"; + } + toSURLString += fromStori.getFilename(); + log.debug("srmMv: New toSURL: {}", toSURLString); + if (inputData instanceof IdentityInputData) { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString), + ((IdentityInputData) inputData).getUser()); + } else { + toStori = namespace.resolveStoRIbySURL(TSURL.makeFromStringValidate(toSURLString)); + } + return toStori; + } + + /** + * Split PFN , recursive creation is not supported, as reported at page 16 of Srm v2.1 spec. + * + * @param user VomsGridUser + * @param LocalFile fromFile + * @param LocalFile toFile + * @return TReturnStatus + */ + private TReturnStatus manageAuthorizedMV(StoRI fromStori, LocalFile toFile) { + + boolean creationDone; + + String explanation = ""; + TStatusCode statusCode = TStatusCode.EMPTY; + + LocalFile fromFile = fromStori.getLocalFile(); + LocalFile toParent = toFile.getParentFile(); + + /* + * Controllare che File sorgente esiste Esiste directory destinazione(che esista e sia + * directory) Non esiste file deestinazione + */ + + boolean sourceExists = false; + boolean targetDirExists = false; + boolean targetFileExists = false; + + if (fromFile != null) { + sourceExists = fromFile.exists(); + } + + if (toParent != null) { + targetDirExists = toParent.exists() && toParent.isDirectory(); + } + + if (toFile != null) { + targetFileExists = toFile.exists(); + } + + if (sourceExists && targetDirExists && !targetFileExists) { + + SURLStatusManager checker = SURLStatusManagerFactory.newSURLStatusManager(); + + if (checker.isSURLBusy(fromStori.getSURL())) { + log.debug("srmMv request failure: fromSURL is busy."); + explanation = "There is an active SrmPrepareToPut on from SURL."; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Check if there is an active SrmPrepareToGet on the source SURL. In that case SrmMv() fails + * with SRM_FILE_BUSY. + */ + + if (checker.isSURLPinned(fromStori.getSURL())) { + log.debug( + "SrmMv: requests fails because the source SURL is being used from other requests."); + explanation = "There is an active SrmPrepareToGet on from SURL"; + return CommandHelper.buildStatus(TStatusCode.SRM_FILE_BUSY, explanation); + } + + /** + * Perform the SrmMv() operation. + */ + creationDone = fromFile.renameTo(toFile.getPath()); + + if (creationDone) { + log.debug("SrmMv: Request success!"); + explanation = "SURL moved with success"; + statusCode = TStatusCode.SRM_SUCCESS; + } else { + log.debug("SrmMv: Requests fails because the path is invalid."); + explanation = "Invalid path"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } + + } else { + if (!sourceExists) { // and it is a file + log.debug("SrmMv: request fails because the source SURL does not exists!"); + explanation = "Source SURL does not exists!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (!targetDirExists) { + log.debug("SrmMv: request fails because the target directory does not exitts."); + explanation = "Target directory does not exits!"; + statusCode = TStatusCode.SRM_INVALID_PATH; + } else { + if (targetFileExists) { + log.debug("SrmMv: request fails because the target SURL exists."); + explanation = "Target SURL exists!"; + statusCode = TStatusCode.SRM_DUPLICATION_ERROR; + } else { + log.debug("SrmMv request failure! That is a BUG!"); + explanation = "That is a bug!"; + statusCode = TStatusCode.SRM_INTERNAL_ERROR; + } + } + } + } + + return CommandHelper.buildStatus(statusCode, explanation); + } + + private void printRequestOutcome(TReturnStatus status, MvInputData inputData) { + + if (inputData != null) { + if (inputData.getFromSURL() != null && inputData.getToSURL() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, Arrays.asList( + new String[] {inputData.getFromSURL().toString(), inputData.getFromSURL().toString()})); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java index 3a41ffadd..141938807 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmCommand.java @@ -24,9 +24,7 @@ import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.catalogs.surl.SURLStatusManager; import it.grid.storm.catalogs.surl.SURLStatusManagerFactory; import it.grid.storm.filesystem.LocalFile; @@ -42,7 +40,6 @@ import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; import it.grid.storm.srm.types.TSURLReturnStatus; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -270,20 +267,6 @@ private StoRI resolveStoRI(TSURL surl, GridUserInterface user) throws RmExceptio private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws RmException { - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RM); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RM); - } - if (!isSpaceAuthorized) { - log.debug("srmRm: User not authorized to perform srmRm on SA: {}", token); - throw new RmException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User not authorized to perform srmRm request on the storage area"); - } AuthzDecision decision; if (isAnonymous(user)) { decision = diff --git a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java index eb485b8e1..8ead85380 100644 --- a/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/directory/RmdirCommand.java @@ -24,9 +24,7 @@ import it.grid.storm.authz.AuthzDecision; import it.grid.storm.authz.AuthzDirector; -import it.grid.storm.authz.SpaceAuthzInterface; import it.grid.storm.authz.path.model.SRMFileRequest; -import it.grid.storm.authz.sa.model.SRMSpaceRequest; import it.grid.storm.filesystem.LocalFile; import it.grid.storm.griduser.GridUserInterface; import it.grid.storm.namespace.InvalidSURLException; @@ -38,7 +36,6 @@ import it.grid.storm.srm.types.SRMCommandException; import it.grid.storm.srm.types.TReturnStatus; import it.grid.storm.srm.types.TSURL; -import it.grid.storm.srm.types.TSpaceToken; import it.grid.storm.srm.types.TStatusCode; import it.grid.storm.synchcall.command.Command; import it.grid.storm.synchcall.command.CommandHelper; @@ -60,31 +57,33 @@ public RmdirException(TStatusCode code, String message) { } } + class TSize { - - private long size; - - TSize(long size) { - this.size = size; - } - - public void add(long n) { - size += n; - } - - public void dec(long n) { - size -= n; - } - - public long get() { - return size; - } - + + private long size; + + TSize(long size) { + this.size = size; + } + + public void add(long n) { + size += n; + } + + public void dec(long n) { + size -= n; + } + + public long get() { + return size; + } + } + /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * * @author lucamag * @date May 27, 2008 @@ -92,253 +91,223 @@ public long get() { public class RmdirCommand extends DirectoryCommand implements Command { - + public static final Logger log = LoggerFactory.getLogger(RmdirCommand.class); - private static final String SRM_COMMAND = "srmRmdir"; - private final NamespaceInterface namespace; - - public RmdirCommand() { - - namespace = NamespaceDirector.getNamespace(); - - } - - /** - * Method that provide SrmRmdir functionality. - * - * @param inputData - * Contains information about input data for Rmdir request. - * @return OutputData Contains output data - */ - public OutputData execute(InputData data) { - - RmdirOutputData outputData = null; - log.debug("SrmRmdir: Start execution."); - checkInputData(data); - outputData = doRmdir((RmdirInputData) data); - log.debug("srmRmdir return status: {}", outputData.getStatus()); - printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); - return outputData; - - } - - private RmdirOutputData doRmdir(RmdirInputData data) { - - TSURL surl = null; - GridUserInterface user = null; - StoRI stori = null; - TReturnStatus returnStatus = null; - boolean recursion = false; - TSize size = new TSize(0); - - try { - surl = getSURL(data); - user = getUser(data); - recursion = isRecursive(data); - stori = resolveStoRI(surl, user); - checkUserAuthorization(stori, user); - log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", - userToString(user), stori.getPFN(), recursion); - returnStatus = removeFolder(stori.getLocalFile(), recursion, size); - log.debug("srmRmdir: decrease used space of {} bytes", size.get()); - try { - decreaseUsedSpace(stori.getLocalFile(), size.get()); - } catch (NamespaceException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus.extendExplaination("Unable to decrease used space: " - + e.getMessage()); - } - } catch (RmdirException e) { - log.error("srmRmdir: {}", e.getMessage()); - returnStatus = e.getReturnStatus(); - } - - log.debug("srmRmdir: returned status is {}", returnStatus); - return new RmdirOutputData(returnStatus); - } - - private void checkInputData(InputData data) - throws IllegalArgumentException { - - if (data == null) { - throw new IllegalArgumentException("Invalid input data: NULL"); - } - if (!(data instanceof RmdirInputData)) { - throw new IllegalArgumentException("Invalid input data type"); - } - } - - private StoRI resolveStoRI(TSURL surl, GridUserInterface user) - throws RmdirException { - - String formatStr = "Unable to build a stori for surl {} for user {}: {}"; - try { - return namespace.resolveStoRIbySURL(surl, user); - } catch (UnapprochableSurlException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - e.getMessage()); - } catch (NamespaceException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } catch (InvalidSURLException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); - } catch (IllegalArgumentException e) { - log.error(formatStr, surl, userToString(user), e.getMessage()); - throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); - } - } - - private boolean isAnonymous(GridUserInterface user) { - - return (user == null); - } - - private String userToString(GridUserInterface user) { - - return isAnonymous(user) ? "anonymous" : user.getDn(); - } - - private void checkUserAuthorization(StoRI stori, GridUserInterface user) - throws RmdirException { - - TSpaceToken token = stori.getVirtualFileSystem().getSpaceToken(); - SpaceAuthzInterface spaceAuth = AuthzDirector.getSpaceAuthz(token); - - boolean isSpaceAuthorized; - if (isAnonymous(user)) { - isSpaceAuthorized = spaceAuth.authorizeAnonymous(SRMSpaceRequest.RMD); - } else { - isSpaceAuthorized = spaceAuth.authorize(user, SRMSpaceRequest.RMD); - } - if (!isSpaceAuthorized) { - log.debug("srmRmdir: User not authorized to perform srmRmdir request " - + "on the storage area: {}", token); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory on the storage area " - + token); - } - - AuthzDecision decision; - if (isAnonymous(user)) { - decision = AuthzDirector.getPathAuthz().authorizeAnonymous( - SRMFileRequest.RMD, stori.getStFN()); - } else { - decision = AuthzDirector.getPathAuthz().authorize(user, - SRMFileRequest.RMD, stori); - } - if (!decision.equals(AuthzDecision.PERMIT)) { - log.debug("srmRmdir: User is not authorized to delete the directory"); - throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, - "User is not authorized to remove the directory"); - } - return; - } - - private GridUserInterface getUser(InputData data) { - - if (data instanceof IdentityInputData) { - return ((IdentityInputData) data).getUser(); - } - return null; - } - - private TSURL getSURL(RmdirInputData data) throws RmdirException { - - TSURL surl = ((RmdirInputData) data).getSurl(); - if (surl == null) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is NULL"); - } - if (surl.isEmpty()) { - throw new RmdirException(TStatusCode.SRM_FAILURE, - "SURL specified is empty"); - } - return surl; - } - - private boolean isRecursive(RmdirInputData data) { - - return data.getRecursive().booleanValue(); - } - - private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) - throws NamespaceException { - - NamespaceDirector.getNamespace().resolveVFSbyLocalFile(localFile) - .decreaseUsedSpace(sizeToRemove); - } - - private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) - throws RmdirException { - - /* - * Check if dir exists and is a directory, if recursion is enabled when - * directory is not empty, etc... - */ - - if (!dir.exists()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, - "Directory does not exists"); - } - if (!dir.isDirectory()) { - return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); - } - if (!recursive && (dir.listFiles().length > 0)) { - return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, - "Directory is not empty"); - } - - if (recursive) { - LocalFile[] list = dir.listFiles(); - log.debug("srmRmdir: removing {} content", dir); - for (LocalFile element : list) { - log.debug("srmRmdir: removing {}", element); - if (element.isDirectory()) { - removeFolder(element, recursive, size); - } else { - removeFile(element, size); - } - } - } - log.debug("srmRmdir: removing {}", dir); - removeEmptyDirectory(dir, size); - return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); - } - - private void removeEmptyDirectory(LocalFile directory, TSize size) - throws RmdirException { - - removeFile(directory, size); - } - - private void removeFile(LocalFile file, TSize size) throws RmdirException { - - long fileSize = file.length(); - if (!file.delete()) { - log.error("srmRmdir: Unable to delete {}", file); - throw new RmdirException(TStatusCode.SRM_FAILURE, - "Unable to delete " + file.getAbsolutePath()); - } - size.add(fileSize); - } - - private void printRequestOutcome(TReturnStatus status, - RmdirInputData inputData) { - - if (inputData != null) { - if (inputData.getSurl() != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, - Arrays.asList(inputData.getSurl().toString())); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } - - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } - -} \ No newline at end of file + private static final String SRM_COMMAND = "srmRmdir"; + private final NamespaceInterface namespace; + + public RmdirCommand() { + + namespace = NamespaceDirector.getNamespace(); + + } + + /** + * Method that provide SrmRmdir functionality. + * + * @param inputData Contains information about input data for Rmdir request. + * @return OutputData Contains output data + */ + public OutputData execute(InputData data) { + + RmdirOutputData outputData = null; + log.debug("SrmRmdir: Start execution."); + checkInputData(data); + outputData = doRmdir((RmdirInputData) data); + log.debug("srmRmdir return status: {}", outputData.getStatus()); + printRequestOutcome(outputData.getStatus(), (RmdirInputData) data); + return outputData; + + } + + private RmdirOutputData doRmdir(RmdirInputData data) { + + TSURL surl = null; + GridUserInterface user = null; + StoRI stori = null; + TReturnStatus returnStatus = null; + boolean recursion = false; + TSize size = new TSize(0); + + try { + surl = getSURL(data); + user = getUser(data); + recursion = isRecursive(data); + stori = resolveStoRI(surl, user); + checkUserAuthorization(stori, user); + log.debug("srmRmdir: rmdir authorized for {}. Dir={}. Recursive={}", userToString(user), + stori.getPFN(), recursion); + returnStatus = removeFolder(stori.getLocalFile(), recursion, size); + log.debug("srmRmdir: decrease used space of {} bytes", size.get()); + try { + decreaseUsedSpace(stori.getLocalFile(), size.get()); + } catch (NamespaceException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus.extendExplaination("Unable to decrease used space: " + e.getMessage()); + } + } catch (RmdirException e) { + log.error("srmRmdir: {}", e.getMessage()); + returnStatus = e.getReturnStatus(); + } + + log.debug("srmRmdir: returned status is {}", returnStatus); + return new RmdirOutputData(returnStatus); + } + + private void checkInputData(InputData data) throws IllegalArgumentException { + + if (data == null) { + throw new IllegalArgumentException("Invalid input data: NULL"); + } + if (!(data instanceof RmdirInputData)) { + throw new IllegalArgumentException("Invalid input data type"); + } + } + + private StoRI resolveStoRI(TSURL surl, GridUserInterface user) throws RmdirException { + + String formatStr = "Unable to build a stori for surl {} for user {}: {}"; + try { + return namespace.resolveStoRIbySURL(surl, user); + } catch (UnapprochableSurlException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, e.getMessage()); + } catch (NamespaceException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } catch (InvalidSURLException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INVALID_PATH, e.getMessage()); + } catch (IllegalArgumentException e) { + log.error(formatStr, surl, userToString(user), e.getMessage()); + throw new RmdirException(TStatusCode.SRM_INTERNAL_ERROR, e.getMessage()); + } + } + + private boolean isAnonymous(GridUserInterface user) { + + return (user == null); + } + + private String userToString(GridUserInterface user) { + + return isAnonymous(user) ? "anonymous" : user.getDn(); + } + + private void checkUserAuthorization(StoRI stori, GridUserInterface user) throws RmdirException { + + AuthzDecision decision; + if (isAnonymous(user)) { + decision = + AuthzDirector.getPathAuthz().authorizeAnonymous(SRMFileRequest.RMD, stori.getStFN()); + } else { + decision = AuthzDirector.getPathAuthz().authorize(user, SRMFileRequest.RMD, stori); + } + if (!decision.equals(AuthzDecision.PERMIT)) { + log.debug("srmRmdir: User is not authorized to delete the directory"); + throw new RmdirException(TStatusCode.SRM_AUTHORIZATION_FAILURE, + "User is not authorized to remove the directory"); + } + return; + } + + private GridUserInterface getUser(InputData data) { + + if (data instanceof IdentityInputData) { + return ((IdentityInputData) data).getUser(); + } + return null; + } + + private TSURL getSURL(RmdirInputData data) throws RmdirException { + + TSURL surl = ((RmdirInputData) data).getSurl(); + if (surl == null) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is NULL"); + } + if (surl.isEmpty()) { + throw new RmdirException(TStatusCode.SRM_FAILURE, "SURL specified is empty"); + } + return surl; + } + + private boolean isRecursive(RmdirInputData data) { + + return data.getRecursive().booleanValue(); + } + + private void decreaseUsedSpace(LocalFile localFile, long sizeToRemove) throws NamespaceException { + + NamespaceDirector.getNamespace() + .resolveVFSbyLocalFile(localFile) + .decreaseUsedSpace(sizeToRemove); + } + + private TReturnStatus removeFolder(LocalFile dir, boolean recursive, TSize size) + throws RmdirException { + + /* + * Check if dir exists and is a directory, if recursion is enabled when directory is not empty, + * etc... + */ + + if (!dir.exists()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Directory does not exists"); + } + if (!dir.isDirectory()) { + return new TReturnStatus(TStatusCode.SRM_INVALID_PATH, "Not a directory"); + } + if (!recursive && (dir.listFiles().length > 0)) { + return new TReturnStatus(TStatusCode.SRM_NON_EMPTY_DIRECTORY, "Directory is not empty"); + } + + if (recursive) { + LocalFile[] list = dir.listFiles(); + log.debug("srmRmdir: removing {} content", dir); + for (LocalFile element : list) { + log.debug("srmRmdir: removing {}", element); + if (element.isDirectory()) { + removeFolder(element, recursive, size); + } else { + removeFile(element, size); + } + } + } + log.debug("srmRmdir: removing {}", dir); + removeEmptyDirectory(dir, size); + return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Directory removed with success!"); + } + + private void removeEmptyDirectory(LocalFile directory, TSize size) throws RmdirException { + + removeFile(directory, size); + } + + private void removeFile(LocalFile file, TSize size) throws RmdirException { + + long fileSize = file.length(); + if (!file.delete()) { + log.error("srmRmdir: Unable to delete {}", file); + throw new RmdirException(TStatusCode.SRM_FAILURE, + "Unable to delete " + file.getAbsolutePath()); + } + size.add(fileSize); + } + + private void printRequestOutcome(TReturnStatus status, RmdirInputData inputData) { + + if (inputData != null) { + if (inputData.getSurl() != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData, + Arrays.asList(inputData.getSurl().toString())); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } + + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } + +} diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java index a6013fb7c..ea9bbefa8 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceMetaDataCommand.java @@ -45,12 +45,11 @@ import it.grid.storm.synchcall.data.space.IdentityGetSpaceMetaDataInputData; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * - * This class represents the GetSpaceMetaDataManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. + * This class represents the GetSpaceMetaDataManager Class. This class hava a reseveSpace method + * that perform all operation nedded to satisfy a SRM space release request. * * @author lucamag * @date May 29, 2008 @@ -59,174 +58,153 @@ public class GetSpaceMetaDataCommand extends SpaceCommand implements Command { - public static final Logger log = LoggerFactory - .getLogger(GetSpaceMetaDataCommand.class); - - private ReservedSpaceCatalog catalog = null; - - private static final String SRM_COMMAND = "srmGetSpaceMetaData"; - - /** - * Constructor. Bind the Executor with ReservedSpaceCatalog - */ - - public GetSpaceMetaDataCommand() { - - catalog = new ReservedSpaceCatalog(); - } - - /** - * - * @param data - * GetSpaceMetaDataInputData - * @return GetSpaceMetaDataOutputData - */ - public OutputData execute(InputData indata) { - - log.debug(""); - log.debug(" Updating SA with GPFS quotas results"); - GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); - - IdentityGetSpaceMetaDataInputData data; - if (indata instanceof IdentityInputData) { - data = (IdentityGetSpaceMetaDataInputData) indata; - } else { - GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (GetSpaceMetaDataInputData) indata); - return outputData; - } - int errorCount = 0; - ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); - TReturnStatus globalStatus = null; - - TMetaDataSpace metadata = null; - - for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { - StorageSpaceData spaceData = null; - try { - spaceData = catalog.getStorageSpace(token); - } catch (TransferObjectDecodingException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error building space data from row DB data", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - - } catch (DataAccessException e) { - log.error("Error getting storage space data for token {}. {}", - token, e.getMessage(),e); - metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, - "Error retrieving row space token data from DB", data.getUser()); - errorCount++; - arrayData.addTMetaDataSpace(metadata); - continue; - } - if (spaceData != null) { - if (!spaceData.isInitialized()) { - log.warn("Uninitialized storage data found for token {}", token); - metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, - "Storage Space not initialized yet", data.getUser()); - errorCount++; - } else { - try { - metadata = new TMetaDataSpace(spaceData); - } catch (InvalidTMetaDataSpaceAttributeException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } catch (InvalidTSizeAttributesException e) { - log.error("Metadata error. {}", e.getMessage(), e); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INTERNAL_ERROR, - "Error building Storage Space Metadata from row data", - data.getUser()); - errorCount++; - } - } - } else { - log.warn("Unable to retrieve space data for token {}.",token); - metadata = createFailureMetadata(token, - TStatusCode.SRM_INVALID_REQUEST, "Space Token not found", - data.getUser()); - errorCount++; - } - arrayData.addTMetaDataSpace(metadata); - } - - boolean requestSuccess = (errorCount == 0); - boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); - - if (requestSuccess) { - globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); - - log.info("srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "done succesfully with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - if (requestFailure) { - globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, - "No valid space tokens"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "failed with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } else { - - globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, - "Check space tokens statuses for details"); - - log.info( - "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " - + "partially done with: [status: {}]", data.getUser(), - data.getSpaceTokenArray(), globalStatus); - - } - } - - GetSpaceMetaDataOutputData response = null; - try { - response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); - } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { - log.error(e.getMessage(),e); - } - return response; - } - - private TMetaDataSpace createFailureMetadata(TSpaceToken token, - TStatusCode statusCode, String message, GridUserInterface user) { - - TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); - metadata.setSpaceToken(token); - - try { - metadata.setStatus(new TReturnStatus(statusCode, message)); - } catch (IllegalArgumentException e) { - log.error(e.getMessage(),e); - } - - return metadata; - } - - private void printRequestOutcome(TReturnStatus status, - GetSpaceMetaDataInputData inputData) { - - if (inputData != null) { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); - } else { - CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); - } - } + public static final Logger log = LoggerFactory.getLogger(GetSpaceMetaDataCommand.class); + + private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); + + private static final String SRM_COMMAND = "srmGetSpaceMetaData"; + + /** + * + * @param data GetSpaceMetaDataInputData + * @return GetSpaceMetaDataOutputData + */ + public OutputData execute(InputData indata) { + + log.debug(""); + log.debug(" Updating SA with GPFS quotas results"); + GPFSQuotaManager.INSTANCE.triggerComputeQuotas(); + + IdentityGetSpaceMetaDataInputData data; + if (indata instanceof IdentityInputData) { + data = (IdentityGetSpaceMetaDataInputData) indata; + } else { + GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (GetSpaceMetaDataInputData) indata); + return outputData; + } + int errorCount = 0; + ArrayOfTMetaDataSpace arrayData = new ArrayOfTMetaDataSpace(); + TReturnStatus globalStatus = null; + + TMetaDataSpace metadata = null; + + for (TSpaceToken token : data.getSpaceTokenArray().getTSpaceTokenArray()) { + StorageSpaceData spaceData = null; + try { + spaceData = catalog.getStorageSpace(token); + } catch (TransferObjectDecodingException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building space data from row DB data", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + + } catch (DataAccessException e) { + log.error("Error getting storage space data for token {}. {}", token, e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error retrieving row space token data from DB", data.getUser()); + errorCount++; + arrayData.addTMetaDataSpace(metadata); + continue; + } + if (spaceData != null) { + if (!spaceData.isInitialized()) { + log.warn("Uninitialized storage data found for token {}", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_FAILURE, + "Storage Space not initialized yet", data.getUser()); + errorCount++; + } else { + try { + metadata = new TMetaDataSpace(spaceData); + } catch (InvalidTMetaDataSpaceAttributeException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } catch (InvalidTSizeAttributesException e) { + log.error("Metadata error. {}", e.getMessage(), e); + metadata = createFailureMetadata(token, TStatusCode.SRM_INTERNAL_ERROR, + "Error building Storage Space Metadata from row data", data.getUser()); + errorCount++; + } + } + } else { + log.warn("Unable to retrieve space data for token {}.", token); + metadata = createFailureMetadata(token, TStatusCode.SRM_INVALID_REQUEST, + "Space Token not found", data.getUser()); + errorCount++; + } + arrayData.addTMetaDataSpace(metadata); + } + + boolean requestSuccess = (errorCount == 0); + boolean requestFailure = (errorCount == data.getSpaceTokenArray().size()); + + if (requestSuccess) { + globalStatus = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "done succesfully with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + if (requestFailure) { + globalStatus = new TReturnStatus(TStatusCode.SRM_FAILURE, "No valid space tokens"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "failed with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } else { + + globalStatus = new TReturnStatus(TStatusCode.SRM_PARTIAL_SUCCESS, + "Check space tokens statuses for details"); + + log.info( + "srmGetSpaceMetadata: user <{}> Request for [spaceTokens: {}] " + + "partially done with: [status: {}]", + data.getUser(), data.getSpaceTokenArray(), globalStatus); + + } + } + + GetSpaceMetaDataOutputData response = null; + try { + response = new GetSpaceMetaDataOutputData(globalStatus, arrayData); + } catch (InvalidGetSpaceMetaDataOutputAttributeException e) { + log.error(e.getMessage(), e); + } + return response; + } + + private TMetaDataSpace createFailureMetadata(TSpaceToken token, TStatusCode statusCode, + String message, GridUserInterface user) { + + TMetaDataSpace metadata = TMetaDataSpace.makeEmpty(); + metadata.setSpaceToken(token); + + try { + metadata.setStatus(new TReturnStatus(statusCode, message)); + } catch (IllegalArgumentException e) { + log.error(e.getMessage(), e); + } + + return metadata; + } + + private void printRequestOutcome(TReturnStatus status, GetSpaceMetaDataInputData inputData) { + + if (inputData != null) { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, inputData); + } else { + CommandHelper.printRequestOutcome(SRM_COMMAND, log, status); + } + } } diff --git a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java index ee40eb5d5..8f0cab2e9 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/GetSpaceTokensCommand.java @@ -36,9 +36,8 @@ import it.grid.storm.synchcall.data.space.GetSpaceTokensOutputData; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project * Execute the GetSpaceTokens - * request. + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * Execute the GetSpaceTokens request. * * @author lucamag * @author Alberto Forti @@ -49,16 +48,10 @@ public class GetSpaceTokensCommand extends SpaceCommand implements Command { - public static final Logger log = LoggerFactory - .getLogger(GetSpaceTokensCommand.class); + public static final Logger log = LoggerFactory.getLogger(GetSpaceTokensCommand.class); private static final String SRM_COMMAND = "srmGetSpaceTokens"; - private ReservedSpaceCatalog catalog = null; - - public GetSpaceTokensCommand() { - - catalog = new ReservedSpaceCatalog(); - }; + private ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); public OutputData execute(InputData data) { @@ -68,11 +61,9 @@ public OutputData execute(InputData data) { inputData = (IdentityGetSpaceTokensInputData) data; } else { outputData = new GetSpaceTokensOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (GetSpaceTokensInputData) data); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (GetSpaceTokensInputData) data); return outputData; } @@ -84,13 +75,12 @@ public OutputData execute(InputData data) { if (user == null) { log.debug("GetSpaceTokens: the user field is NULL"); status = new TReturnStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential!"); + "Unable to get user credential!"); - log.error("srmGetSpaceTokens: <{}> " - + "Request for [spaceTokenDescription:{}] failed with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + log.error( + "srmGetSpaceTokens: <{}> " + + "Request for [spaceTokenDescription:{}] failed with: [status: {}]", + user, inputData.getSpaceTokenAlias(), status); outputData = new GetSpaceTokensOutputData(status, null); return outputData; @@ -98,39 +88,33 @@ public OutputData execute(InputData data) { String spaceAlias = inputData.getSpaceTokenAlias(); log.debug("spaceAlias= {}", spaceAlias); - - ArrayOfTSpaceToken arrayOfSpaceTokens = catalog.getSpaceTokens(user, - spaceAlias); + + ArrayOfTSpaceToken arrayOfSpaceTokens = catalog.getSpaceTokens(user, spaceAlias); if (arrayOfSpaceTokens.size() == 0) { arrayOfSpaceTokens = catalog.getSpaceTokensByAlias(spaceAlias); } - if (arrayOfSpaceTokens.size() == 0) { - if (spaceAlias != null) { - status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "'userSpaceTokenDescription' does not refer to an existing space"); - } else { - status = new TReturnStatus(TStatusCode.SRM_FAILURE, - "No space tokens owned by this user"); - } - arrayOfSpaceTokens = null; - } else { - status = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); - } + if (arrayOfSpaceTokens.size() == 0) { + if (spaceAlias != null) { + status = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, + "'userSpaceTokenDescription' does not refer to an existing space"); + } else { + status = new TReturnStatus(TStatusCode.SRM_FAILURE, "No space tokens owned by this user"); + } + arrayOfSpaceTokens = null; + } else { + status = new TReturnStatus(TStatusCode.SRM_SUCCESS, ""); + } if (status.isSRM_SUCCESS()) { - log.info("srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " - + "succesfully done with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + log.info( + "srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " + + "succesfully done with: [status: {}]", + user, inputData.getSpaceTokenAlias(), status); } else { log.error("srmGetSpaceTokens: <{}> Request for [spaceTokenDescription: {}] " - + "failed with: [status: {}]", - user, - inputData.getSpaceTokenAlias(), - status); + + "failed with: [status: {}]", user, inputData.getSpaceTokenAlias(), status); } outputData = new GetSpaceTokensOutputData(status, arrayOfSpaceTokens); @@ -139,8 +123,7 @@ public OutputData execute(InputData data) { } - private void printRequestOutcome(TReturnStatus status, - GetSpaceTokensInputData data) { + private void printRequestOutcome(TReturnStatus status, GetSpaceTokensInputData data) { if (data != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, data); diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java index 9bef19955..a4283d409 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReleaseSpaceCommand.java @@ -39,9 +39,8 @@ import org.slf4j.LoggerFactory; /** - * This class represents the ReleaseSpaceManager Class. This class hava a - * reseveSpace method that perform all operation nedded to satisfy a SRM space - * release request. + * This class represents the ReleaseSpaceManager Class. This class hava a reseveSpace method that + * perform all operation nedded to satisfy a SRM space release request. * * @author Magnoni Luca * @author Cnaf -INFN Bologna @@ -51,21 +50,12 @@ public class ReleaseSpaceCommand extends SpaceCommand implements Command { - private final ReservedSpaceCatalog catalog; + private final ReservedSpaceCatalog catalog = ReservedSpaceCatalog.getInstance(); - /** - * Logger - */ - private static final Logger log = LoggerFactory - .getLogger(ReleaseSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReleaseSpaceCommand.class); private static final String SRM_COMMAND = "srmReleaseSpace"; - public ReleaseSpaceCommand() { - - catalog = new ReservedSpaceCatalog(); - }; - public OutputData execute(InputData indata) { ReleaseSpaceOutputData outputData = new ReleaseSpaceOutputData(); @@ -73,20 +63,16 @@ public OutputData execute(InputData indata) { if (indata instanceof IdentityInputData) { inputData = (IdentityReleaseSpaceInputData) indata; } else { - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReleaseSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReleaseSpaceInputData) indata); return outputData; } TReturnStatus returnStatus = null; - if ((inputData == null) - || ((inputData != null) && (inputData.getSpaceToken() == null))) { + if ((inputData == null) || ((inputData != null) && (inputData.getSpaceToken() == null))) { log.error("Empty space token."); - returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, - "SpaceToken is empty."); + returnStatus = new TReturnStatus(TStatusCode.SRM_INVALID_REQUEST, "SpaceToken is empty."); outputData.setStatus(returnStatus); return outputData; } @@ -95,11 +81,11 @@ public OutputData execute(InputData indata) { if (user == null) { log.debug("Null user credentials."); returnStatus = new TReturnStatus(TStatusCode.SRM_AUTHENTICATION_FAILURE, - "Unable to get user credential"); + "Unable to get user credential"); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -113,16 +99,16 @@ public OutputData execute(InputData indata) { try { data = catalog.getStorageSpace(inputData.getSpaceToken()); } catch (Throwable e) { - log.error("Error fetching data for space token {}. {}", - inputData.getSpaceToken(), e.getMessage(), e); + log.error("Error fetching data for space token {}. {}", inputData.getSpaceToken(), + e.getMessage(), e); explanation = "Error building space data from row DB data."; statusCode = TStatusCode.SRM_INTERNAL_ERROR; returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -133,8 +119,8 @@ public OutputData execute(InputData indata) { returnStatus = new TReturnStatus(statusCode, explanation); outputData.setStatus(returnStatus); - log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] failed " + "with: [status: {}]", + user, inputData.getSpaceToken(), returnStatus); return outputData; } @@ -170,13 +156,12 @@ public OutputData execute(InputData indata) { if (returnStatus.isSRM_SUCCESS()) { log.error("srmReleaseSpace: <{}> Request for [spacetoken: {}] succesfully done " - + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); - + + "with: [status: {}]", user, inputData.getSpaceToken(), returnStatus); + } else { log.error("srmReleaseSpace: <" + user + "> Request for [spacetoken:" - + inputData.getSpaceToken() + "] for failed with: [status:" - + returnStatus + "]"); + + inputData.getSpaceToken() + "] for failed with: [status:" + returnStatus + "]"); } @@ -185,14 +170,12 @@ public OutputData execute(InputData indata) { /** * - * @param user - * GridUserInterface - * @param data - * StorageSpaceData + * @param user GridUserInterface + * @param data StorageSpaceData * @return TReturnStatus */ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, - GridUserInterface user) { + GridUserInterface user) { String spaceFileName; PFN pfn = data.getSpaceFileName(); @@ -206,19 +189,17 @@ private TReturnStatus manageAuthorizedReleaseSpace(StorageSpaceData data, return new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Released."); } else { return new TReturnStatus(TStatusCode.SRM_INTERNAL_ERROR, - "Space removed, but spaceToken was not found in the DB"); + "Space removed, but spaceToken was not found in the DB"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, - "Space can not be removed by StoRM!"); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "Space can not be removed by StoRM!"); } } else { - return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); + return new TReturnStatus(TStatusCode.SRM_FAILURE, "SRM Internal failure."); } } - private void printRequestOutcome(TReturnStatus status, - ReleaseSpaceInputData indata) { + private void printRequestOutcome(TReturnStatus status, ReleaseSpaceInputData indata) { if (indata != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, indata); diff --git a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java index 8aa35775a..692a42c3f 100644 --- a/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java +++ b/src/main/java/it/grid/storm/synchcall/command/space/ReserveSpaceCommand.java @@ -19,7 +19,6 @@ import it.grid.storm.acl.AclManager; import it.grid.storm.acl.AclManagerFS; -import it.grid.storm.catalogs.InvalidSpaceDataAttributesException; import it.grid.storm.catalogs.ReservedSpaceCatalog; import it.grid.storm.common.types.PFN; import it.grid.storm.common.types.SizeUnit; @@ -37,6 +36,7 @@ import it.grid.storm.namespace.VirtualFSInterface; import it.grid.storm.namespace.naming.NamespaceUtil; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.exceptions.InvalidSpaceDataAttributesException; import it.grid.storm.persistence.model.TransferObjectDecodingException; import it.grid.storm.space.StorageSpaceData; import it.grid.storm.srm.types.InvalidTSizeAttributesException; @@ -69,8 +69,8 @@ import org.slf4j.LoggerFactory; /** - * This class is part of the StoRM project. Copyright: Copyright (c) 2008 - * Company: INFN-CNAF and ICTP/EGRID project + * This class is part of the StoRM project. Copyright: Copyright (c) 2008 Company: INFN-CNAF and + * ICTP/EGRID project * * @author lucamag * @author Riccardo Zappi @@ -81,8 +81,7 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { private ReservedSpaceCatalog catalog; - private static final Logger log = LoggerFactory - .getLogger(ReserveSpaceCommand.class); + private static final Logger log = LoggerFactory.getLogger(ReserveSpaceCommand.class); private NamespaceInterface namespace; @@ -92,14 +91,15 @@ public class ReserveSpaceCommand extends SpaceCommand implements Command { String explanation = null; private void logRequestSuccess(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TReturnStatus status) { - - log.info("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "succesfully done with: [status: {}]", user, desSize, guarSize, - lifetime, rpinfo, status); + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TReturnStatus status) { + + log.info( + "srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "succesfully done with: [status: {}]", + user, desSize, guarSize, lifetime, rpinfo, status); } private void logRequestFailure(TStatusCode code, String explanation) { @@ -109,29 +109,27 @@ private void logRequestFailure(TStatusCode code, String explanation) { } private void logRequestFailure(GridUserInterface user, TSizeInBytes desSize, - TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, - TRetentionPolicyInfo rpinfo, TStatusCode code, String explanation) { + TSizeInBytes guarSize, TLifeTimeInSeconds lifetime, TRetentionPolicyInfo rpinfo, + TStatusCode code, String explanation) { TReturnStatus status = new TReturnStatus(code, explanation); log.error("srmReservespace: <{}> Request for [desiredSizeOfTotalSpace: {}," - + " desiredSizeOfGuaranteedSpace: {}] with " - + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" - + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, - status); + + " desiredSizeOfGuaranteedSpace: {}] with " + + "[desiredLifetimeOfReservedSpace: {}, retentionPolicyInfo: {}]" + + "failed with: [status: {}]", user, desSize, guarSize, lifetime, rpinfo, status); } public ReserveSpaceCommand() { namespace = NamespaceDirector.getNamespace(); - catalog = new ReservedSpaceCatalog(); + catalog = ReservedSpaceCatalog.getInstance(); } /** * Method that provide space reservation for srmReserveSpace request. * - * @param data - * Contain information about data procived in SRM request. + * @param data Contain information about data procived in SRM request. * @return SpaceResOutputData that contain all SRM return parameter. * @todo Implement this it.grid.storm.synchcall.space.SpaceManager method */ @@ -142,11 +140,9 @@ public OutputData execute(InputData indata) { data = (IdentityReserveSpaceInputData) indata; } else { GetSpaceMetaDataOutputData outputData = new GetSpaceMetaDataOutputData(); - outputData.setStatus(CommandHelper.buildStatus( - TStatusCode.SRM_NOT_SUPPORTED, "Anonymous user can not perform" - + SRM_COMMAND)); - printRequestOutcome(outputData.getStatus(), - (ReserveSpaceInputData) indata); + outputData.setStatus(CommandHelper.buildStatus(TStatusCode.SRM_NOT_SUPPORTED, + "Anonymous user can not perform" + SRM_COMMAND)); + printRequestOutcome(outputData.getStatus(), (ReserveSpaceInputData) indata); return outputData; } log.debug(":reserveSpace start."); @@ -161,9 +157,8 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -175,9 +170,8 @@ public OutputData execute(InputData indata) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -190,43 +184,37 @@ public OutputData execute(InputData indata) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } SpaceSize spaceSize = null; try { - spaceSize = computeSpaceSize(data.getDesiredSize(), - data.getGuaranteedSize(), vfs); + spaceSize = computeSpaceSize(data.getDesiredSize(), data.getGuaranteedSize(), vfs); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } StoRI spaceStori = null; try { - spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, - spaceSize.getDesiderataSpaceSize()); + spaceStori = getSpaceStoRI(vfs, relativeSpaceFN, spaceSize.getDesiderataSpaceSize()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } - log - .debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); + log.debug("Reserve Space File Size: {}", spaceSize.getDesiderataSpaceSize()); try { spaceStori.getSpace().fakeAllot(); @@ -235,9 +223,8 @@ public OutputData execute(InputData indata) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create Space File into filesystem. \n"; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return manageError(statusCode, explanation); } @@ -248,9 +235,8 @@ public OutputData execute(InputData indata) { } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -258,15 +244,14 @@ public OutputData execute(InputData indata) { TSpaceToken spaceToken = null; try { - spaceToken = registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), - spaceSize.getTotalSize(), spaceSize.getDesiderataSpaceSize(), - data.getSpaceLifetime(), spaceStori.getPFN()); + spaceToken = + registerIntoDB(data.getUser(), data.getSpaceTokenAlias(), spaceSize.getTotalSize(), + spaceSize.getDesiderataSpaceSize(), data.getSpaceLifetime(), spaceStori.getPFN()); } catch (Exception e) { log.error(e.getMessage(), e); - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); @@ -276,16 +261,14 @@ public OutputData execute(InputData indata) { try { output = buildOutput(spaceSize, spaceToken, data.getSpaceLifetime()); - logRequestSuccess(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), output.getStatus()); + logRequestSuccess(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), output.getStatus()); } catch (Exception e) { statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to build a valid output object "; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); revertAllocation(spaceStori.getSpace()); return manageError(statusCode, explanation); } @@ -302,7 +285,7 @@ private void revertAllocation(Space space) { } private StoRI getSpaceStoRI(VirtualFSInterface vfs, String relativeSpaceFN, - TSizeInBytes desiderataSpaceSize) throws Exception { + TSizeInBytes desiderataSpaceSize) throws Exception { StoRI spaceFile = null; try { @@ -338,31 +321,28 @@ private boolean checkParameters(IdentityReserveSpaceInputData data) { log.debug("Null retentionPolicyInfo."); statusCode = TStatusCode.SRM_INVALID_REQUEST; explanation = "RetentionPolicy not specified."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } TAccessLatency latency = data.getRetentionPolicyInfo().getAccessLatency(); - TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo() - .getRetentionPolicy(); + TRetentionPolicy retentionPolicy = data.getRetentionPolicyInfo().getRetentionPolicy(); - if (!((latency == null || latency.equals(TAccessLatency.EMPTY) || latency - .equals(TAccessLatency.ONLINE)) && (retentionPolicy == null - || retentionPolicy.equals(TRetentionPolicy.EMPTY) || retentionPolicy - .equals(TRetentionPolicy.REPLICA)))) { + if (!((latency == null || latency.equals(TAccessLatency.EMPTY) + || latency.equals(TAccessLatency.ONLINE)) + && (retentionPolicy == null || retentionPolicy.equals(TRetentionPolicy.EMPTY) + || retentionPolicy.equals(TRetentionPolicy.REPLICA)))) { - log.debug("Invalid retentionPolicyInfo: {}, {}", data - .getRetentionPolicyInfo().getAccessLatency(), data - .getRetentionPolicyInfo().getRetentionPolicy()); + log.debug("Invalid retentionPolicyInfo: {}, {}", + data.getRetentionPolicyInfo().getAccessLatency(), + data.getRetentionPolicyInfo().getRetentionPolicy()); statusCode = TStatusCode.SRM_NOT_SUPPORTED; explanation = "RetentionPolicy requested cannot be satisfied."; - logRequestFailure(data.getUser(), data.getDesiredSize(), - data.getGuaranteedSize(), data.getSpaceLifetime(), - data.getRetentionPolicyInfo(), statusCode, explanation); + logRequestFailure(data.getUser(), data.getDesiredSize(), data.getGuaranteedSize(), + data.getSpaceLifetime(), data.getRetentionPolicyInfo(), statusCode, explanation); return false; } @@ -400,19 +380,15 @@ private VirtualFSInterface getSpaceVFS(String spaceFN) throws Exception { return vfs; } - private void setDefaults(IdentityReserveSpaceInputData data, - VirtualFSInterface vfs) { + private void setDefaults(IdentityReserveSpaceInputData data, VirtualFSInterface vfs) { if (data.getRetentionPolicyInfo().getAccessLatency() == null - || data.getRetentionPolicyInfo().getAccessLatency() - .equals(TAccessLatency.EMPTY)) { + || data.getRetentionPolicyInfo().getAccessLatency().equals(TAccessLatency.EMPTY)) { data.getRetentionPolicyInfo().setAccessLatency(TAccessLatency.ONLINE); } if (data.getRetentionPolicyInfo().getRetentionPolicy() == null - || data.getRetentionPolicyInfo().getRetentionPolicy() - .equals(TRetentionPolicy.EMPTY)) { - data.getRetentionPolicyInfo() - .setRetentionPolicy(TRetentionPolicy.REPLICA); + || data.getRetentionPolicyInfo().getRetentionPolicy().equals(TRetentionPolicy.EMPTY)) { + data.getRetentionPolicyInfo().setRetentionPolicy(TRetentionPolicy.REPLICA); } if (data.getSpaceLifetime().isEmpty()) { log.debug("LifeTime is EMPTY. Using default value."); @@ -420,13 +396,12 @@ private void setDefaults(IdentityReserveSpaceInputData data, } } - private SpaceSize computeSpaceSize(TSizeInBytes totalSize, - TSizeInBytes guarSize, VirtualFSInterface vfs) throws Exception { + private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes guarSize, + VirtualFSInterface vfs) throws Exception { TSizeInBytes desiderataSpaceSize = TSizeInBytes.makeEmpty(); - if ((!(totalSize.isEmpty())) - && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { + if ((!(totalSize.isEmpty())) && (!((guarSize.isEmpty()) || guarSize.value() == 0))) { if (totalSize.value() < guarSize.value()) { log.debug("Error: totalSize < guaranteedSize"); statusCode = TStatusCode.SRM_INVALID_REQUEST; @@ -461,8 +436,8 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, } /* - * At this point either totalSize and guarSize contains significative value. - * desiderataSpaceSize is setted to totalSize. + * At this point either totalSize and guarSize contains significative value. desiderataSpaceSize + * is setted to totalSize. */ desiderataSpaceSize = totalSize; // This is valid because StoRM only reserve GUARANTEED space. @@ -470,23 +445,20 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, TSizeInBytes freeSpace = null; try { - freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace(), - SizeUnit.BYTES); + freeSpace = TSizeInBytes.make(vfs.getFilesystem().getFreeSpace(), SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { - log - .debug("Error while retrieving free Space in underlying Filesystem", e); + log.debug("Error while retrieving free Space in underlying Filesystem", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem \n" - + e; + explanation = "Error while retrieving free Space in underlying Filesystem \n" + e; throw new Exception(explanation); } catch (NamespaceException ex) { - log - .debug( + log.debug( "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver", ex); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" - + ex; + explanation = + "Error while retrieving free Space in underlying Filesystem. Unable to retrieve FS Driver \n" + + ex; throw new Exception(explanation); } @@ -498,8 +470,7 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, if (freeSpace.value() < desiderataSpaceSize.value()) { if (freeSpace.value() < guarSize.value()) { // Not enough freespace - log - .debug(":reserveSpace Not Enough Free Space on storage!"); + log.debug(":reserveSpace Not Enough Free Space on storage!"); statusCode = TStatusCode.SRM_NO_FREE_SPACE; explanation = "SRM has not more free space."; throw new Exception(explanation); @@ -512,21 +483,18 @@ private SpaceSize computeSpaceSize(TSizeInBytes totalSize, return this.new SpaceSize(desiderataSpaceSize, totalSize, lower_space); } - private String getRelativeSpaceFilePath(VirtualFSInterface vfs, String spaceFN) - throws Exception { + private String getRelativeSpaceFilePath(VirtualFSInterface vfs, String spaceFN) throws Exception { String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); return relativeSpaceFN; } - private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) - throws Exception { + private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throws Exception { FilesystemPermission fp = FilesystemPermission.ReadWrite; @@ -542,8 +510,7 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) throw new Exception(explanation); } if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} , localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} , localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; throw new Exception(explanation); @@ -569,23 +536,21 @@ private void setSpaceFilePermissions(StoRI spaceStori, GridUserInterface user) } } - private TSpaceToken registerIntoDB(GridUserInterface user, - String spaceTokenAlias, TSizeInBytes totalSize, - TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, PFN pfn) - throws Exception { + private TSpaceToken registerIntoDB(GridUserInterface user, String spaceTokenAlias, + TSizeInBytes totalSize, TSizeInBytes desiderataSpaceSize, TLifeTimeInSeconds lifeTime, + PFN pfn) throws Exception { StorageSpaceData spaceData = null; try { - spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, - spaceTokenAlias, totalSize, desiderataSpaceSize, lifeTime, null, - new Date(), pfn); + spaceData = new StorageSpaceData(user, TSpaceType.PERMANENT, spaceTokenAlias, totalSize, + desiderataSpaceSize, lifeTime, null, new Date(), pfn); } catch (InvalidSpaceDataAttributesException e) { log.debug("Unable to create Storage Space Data", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create storage space data."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } @@ -601,8 +566,8 @@ private TSpaceToken registerIntoDB(GridUserInterface user, log.debug("Unable to register Storage Space Data into DB", e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to register Storage Space Data into DB."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } @@ -614,31 +579,30 @@ private TSpaceToken registerIntoDB(GridUserInterface user, statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to create space token."; - logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, - statusCode, explanation); + logRequestFailure(user, totalSize, desiderataSpaceSize, lifeTime, null, statusCode, + explanation); throw new Exception(explanation); } return spaceToken; } - private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, - TSpaceToken spaceToken, TLifeTimeInSeconds lifeTime) throws Exception { + private ReserveSpaceOutputData buildOutput(SpaceSize spaceSize, TSpaceToken spaceToken, + TLifeTimeInSeconds lifeTime) throws Exception { TReturnStatus status = null; - if (!spaceSize.isLowerSpace()) { - status = new TReturnStatus(TStatusCode.SRM_SUCCESS, - "Space Reservation done"); + if (!spaceSize.isLowerSpace()) { + status = new TReturnStatus(TStatusCode.SRM_SUCCESS, "Space Reservation done"); - } else { - status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, - "Space Reservation done, lower space granted."); - } + } else { + status = new TReturnStatus(TStatusCode.SRM_LOWER_SPACE_GRANTED, + "Space Reservation done, lower space granted."); + } ReserveSpaceOutputData outputData = null; try { outputData = new ReserveSpaceOutputData(spaceSize.getTotalSize(), - spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); + spaceSize.getDesiderataSpaceSize(), lifeTime, spaceToken, status); } catch (InvalidReserveSpaceOutputDataAttributesException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -654,8 +618,7 @@ private class SpaceSize { private final TSizeInBytes totalSize; private final boolean lowerSpace; - public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, - boolean lowerSpace) { + public SpaceSize(TSizeInBytes desiderataSpaceSize, TSizeInBytes totalSize, boolean lowerSpace) { this.desiderataSpaceSize = desiderataSpaceSize; this.totalSize = totalSize; @@ -681,9 +644,7 @@ protected boolean isLowerSpace() { /** * Method that reset an already done reservation to the original status. * - * @param token - * TSpaceToken that contains information about data procived in SRM - * request. + * @param token TSpaceToken that contains information about data procived in SRM request. * @return TReturnStatus that contains of all SRM return parameters. */ public TReturnStatus resetReservation(TSpaceToken token) { @@ -727,8 +688,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { } String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); log.debug("relativeSpaceFN: {}", relativeSpaceFN); @@ -741,8 +701,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { StoRI spaceFile = null; try { - spaceFile = vfs.createSpace(relativeSpaceFN, - desiderataSpaceSize.value()); + spaceFile = vfs.createSpace(relativeSpaceFN, desiderataSpaceSize.value()); } catch (NamespaceException e) { log.debug(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; @@ -768,8 +727,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -795,8 +753,7 @@ public TReturnStatus resetReservation(TSpaceToken token) { LocalFile localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); @@ -827,16 +784,14 @@ public TReturnStatus resetReservation(TSpaceToken token) { } catch (DataAccessException e) { log.error(e.getMessage(), e); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); } - public TReturnStatus updateReservation(TSpaceToken token, - TSizeInBytes sizeToAdd, TSURL toSurl) { + public TReturnStatus updateReservation(TSpaceToken token, TSizeInBytes sizeToAdd, TSURL toSurl) { String explanation = null; TStatusCode statusCode = TStatusCode.EMPTY; @@ -878,8 +833,7 @@ public TReturnStatus updateReservation(TSpaceToken token, String relativeSpaceFN = null; - relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), - spaceFN); + relativeSpaceFN = NamespaceUtil.extractRelativePath(vfs.getRootPath(), spaceFN); TSizeInBytes desiderataSpaceSize = sdata.getTotalSpaceSize(); TSizeInBytes availableSize = sdata.getAvailableSpaceSize(); @@ -888,8 +842,8 @@ public TReturnStatus updateReservation(TSpaceToken token, log.debug("Size of removed file: {}" + sizeToAdd.value()); try { - desiderataSpaceSize = TSizeInBytes.make( - availableSize.value() + sizeToAdd.value(), SizeUnit.BYTES); + desiderataSpaceSize = + TSizeInBytes.make(availableSize.value() + sizeToAdd.value(), SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage()); } @@ -932,8 +886,7 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; @@ -962,16 +915,14 @@ public TReturnStatus updateReservation(TSpaceToken token, localFile = spaceFile.getLocalFile(); LocalUser localUser = user.getLocalUser(); if (localFile == null || localUser == null) { - log.error("ACL setup error. localFile={} localUser={}", localFile, - localUser); + log.error("ACL setup error. localFile={} localUser={}", localFile, localUser); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; explanation = "Unable to setting up the ACL "; return manageErrorStatus(statusCode, explanation); } else { try { - manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, - fp); + manager.grantGroupPermission(spaceFile.getLocalFile(), localUser, fp); } catch (IllegalArgumentException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); @@ -990,14 +941,13 @@ public TReturnStatus updateReservation(TSpaceToken token, } try { - availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() - + sizeToAdd.value(), SizeUnit.BYTES); + availableSize = TSizeInBytes.make(sdata.getAvailableSpaceSize().value() + sizeToAdd.value(), + SizeUnit.BYTES); } catch (InvalidTSizeAttributesException e) { log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error computing new available space size\n" - + e.getMessage(); + explanation = "Error computing new available space size\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } @@ -1009,8 +959,7 @@ public TReturnStatus updateReservation(TSpaceToken token, log.error(e.getMessage(), e); revertOldSpaceFileDeletion(localFile); statusCode = TStatusCode.SRM_INTERNAL_ERROR; - explanation = "Error persisting space token data into the DB\n" - + e.getMessage(); + explanation = "Error persisting space token data into the DB\n" + e.getMessage(); return manageErrorStatus(statusCode, explanation); } return manageErrorStatus(TStatusCode.SRM_SUCCESS, "Successfull creation."); @@ -1020,8 +969,7 @@ private void revertOldSpaceFileDeletion(LocalFile localFile) { } - private ReserveSpaceOutputData manageError(TStatusCode statusCode, - String explanation) { + private ReserveSpaceOutputData manageError(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1033,8 +981,7 @@ private ReserveSpaceOutputData manageError(TStatusCode statusCode, return new ReserveSpaceOutputData(status); } - private TReturnStatus manageErrorStatus(TStatusCode statusCode, - String explanation) { + private TReturnStatus manageErrorStatus(TStatusCode statusCode, String explanation) { TReturnStatus status = null; try { @@ -1045,8 +992,7 @@ private TReturnStatus manageErrorStatus(TStatusCode statusCode, return status; } - private void printRequestOutcome(TReturnStatus status, - ReserveSpaceInputData data) { + private void printRequestOutcome(TReturnStatus status, ReserveSpaceInputData data) { if (data != null) { CommandHelper.printRequestOutcome(SRM_COMMAND, log, status, data); diff --git a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java index 34168f7d9..6847eb4c1 100644 --- a/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java +++ b/src/main/java/it/grid/storm/synchcall/data/datatransfer/AnonymousPrepareToPutInputData.java @@ -17,10 +17,10 @@ package it.grid.storm.synchcall.data.datatransfer; -import it.grid.storm.catalogs.OverwriteModeConverter; import it.grid.storm.common.types.TURLPrefix; import it.grid.storm.common.types.TimeUnit; import it.grid.storm.config.Configuration; +import it.grid.storm.persistence.converter.OverwriteModeConverter; import it.grid.storm.srm.types.TLifeTimeInSeconds; import it.grid.storm.srm.types.TOverwriteMode; import it.grid.storm.srm.types.TSURL; diff --git a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java b/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java index 596026f5c..a323820ea 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java +++ b/src/main/java/it/grid/storm/tape/recalltable/TapeRecallCatalog.java @@ -24,13 +24,13 @@ import com.google.common.collect.Lists; import it.grid.storm.asynch.Suspendedable; -import it.grid.storm.catalogs.BoLPersistentChunkData; -import it.grid.storm.catalogs.PersistentChunkData; -import it.grid.storm.catalogs.PtGData; -import it.grid.storm.catalogs.RequestData; -import it.grid.storm.persistence.PersistenceDirector; import it.grid.storm.persistence.dao.TapeRecallDAO; import it.grid.storm.persistence.exceptions.DataAccessException; +import it.grid.storm.persistence.impl.mysql.TapeRecallDAOMySql; +import it.grid.storm.persistence.model.BoLPersistentChunkData; +import it.grid.storm.persistence.model.PersistentChunkData; +import it.grid.storm.persistence.model.PtGData; +import it.grid.storm.persistence.model.RequestData; import it.grid.storm.persistence.model.TapeRecallTO; import it.grid.storm.tape.recalltable.model.TapeRecallStatus; @@ -42,6 +42,7 @@ import java.util.Date; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; @@ -51,10 +52,19 @@ public class TapeRecallCatalog { private static final Logger log = LoggerFactory.getLogger(TapeRecallCatalog.class); - private final TapeRecallDAO tapeRecallDAO; + private static TapeRecallCatalog instance; private static Map> recallBuckets = new ConcurrentHashMap<>(); + public static synchronized TapeRecallCatalog getInstance() { + if (instance == null) { + instance = new TapeRecallCatalog(); + } + return instance; + } + + private TapeRecallDAO tapeRecallDAO; + /** * Default constructor * @@ -62,7 +72,7 @@ public class TapeRecallCatalog { */ public TapeRecallCatalog() { - tapeRecallDAO = PersistenceDirector.getDAOFactory().getTapeRecallDAO(); + tapeRecallDAO = TapeRecallDAOMySql.getInstance(); } /** @@ -98,25 +108,6 @@ public int getNumberTaskInProgress() throws DataAccessException { return result; } - /** - * Determines how many task rows have an in-progress state given a certain VO - * - * @param voName @return @throws DataAccessException - */ - public int getNumberTaskInProgress(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberInProgress(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks currently in progress. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * Determines how many task rows have a queued state * @@ -135,24 +126,6 @@ public int getNumberTaskQueued() throws DataAccessException { return result; } - /** - * Determines how many task rows have a queued state given a certain VO - * - * @return @throws DataAccessException - */ - public int getNumberTaskQueued(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getNumberQueued(voName); - } catch (DataAccessException e) { - log.error("Unable to retrieve the number of tasks queued. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * * Determines how many task rows have a queued state and their deferred start time is elapsed @@ -173,30 +146,10 @@ public int getReadyForTakeOver() throws DataAccessException { return result; } - /** - * Determines how many task rows given a certain VO have a queued state and their deferred start - * time is elapsed - * - * @return @throws DataAccessException - */ - public int getReadyForTakeOver(String voName) throws DataAccessException { - - int result = -1; - try { - result = tapeRecallDAO.getReadyForTakeOver(voName); - } catch (DataAccessException e) { - log.error( - "Unable to retrieve the number of tasks ready for the take-over. DataAccessException: {}", - e.getMessage(), e); - throw e; - } - return result; - } - /** * @param taskId @param requestToken @return @throws DataAccessException */ - public TapeRecallTO getTask(UUID taskId, String requestToken) throws DataAccessException { + public Optional getTask(UUID taskId, String requestToken) throws DataAccessException { return tapeRecallDAO.getTask(taskId, requestToken); } @@ -279,48 +232,6 @@ public List getAllInProgressTasks(int numberOfTaks) { return taskList; } - /** - * @return - */ - public TapeRecallTO takeoverTask() { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(); - } catch (DataAccessException e) { - log.error("Unable to takeove a task.", e); - } - return task; - } - - /** - * @param voName @return - */ - public TapeRecallTO takeoverTask(String voName) { - - TapeRecallTO task = null; - try { - task = tapeRecallDAO.takeoverTask(voName); - } catch (DataAccessException e) { - log.error("Unable to takeover a task for vo {}", voName, e); - } - return task; - } - - /** - * @param numberOfTaks @param voName @return - */ - public List takeoverTasks(int numberOfTaks, String voName) { - - List taskList = Lists.newArrayList(); - try { - taskList.addAll(tapeRecallDAO.takeoverTasksWithDoubles(numberOfTaks, voName)); - } catch (DataAccessException e) { - log.error("Unable to takeover {} tasks for vo {}", numberOfTaks, voName, e); - } - return taskList; - } - /** * Method used by PtGChunk and BoLChunk to request the recall of a file * diff --git a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java index 54523b253..0b1d0ad32 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java +++ b/src/main/java/it/grid/storm/tape/recalltable/model/PutTapeRecallStatusLogic.java @@ -23,6 +23,7 @@ import it.grid.storm.tape.recalltable.TapeRecallException; import java.util.Date; +import java.util.Optional; import java.util.UUID; import javax.ws.rs.core.Response; @@ -36,76 +37,88 @@ */ public class PutTapeRecallStatusLogic { - private static final Logger log = LoggerFactory - .getLogger(PutTapeRecallStatusLogic.class); - - /** - * @param requestToken - * @param stori - * @return - * @throws TapeRecallException - */ - public static Response serveRequest(String requestToken, StoRI stori) - throws TapeRecallException { - - LocalFile localFile = stori.getLocalFile(); - boolean fileOnDisk; - - try { - fileOnDisk = localFile.isOnDisk(); - } catch (FSException e) { - log.error("Unable to test file {} presence on disk. FSException {}" , localFile.getAbsolutePath() , e.getMessage() , e); - throw new TapeRecallException("Error checking file existence"); - } - - if (!fileOnDisk) { - return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); - } - - if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { - // tape not enable for StoRI filesystem, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - String pfn = localFile.getAbsolutePath(); - UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); - TapeRecallCatalog rtCat = new TapeRecallCatalog(); - boolean exists = false; - try { - exists = rtCat.existsTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}" , taskId , requestToken , e.getMessage() , e); - throw new TapeRecallException("Error reading from tape recall table"); - } - if (!exists) { - // no recall tasks for this file, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - TapeRecallTO task; - try { - task = rtCat.getTask(taskId, requestToken); - } catch (DataAccessException e) { - log.error("Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", requestToken , e.getMessage(),e); - throw new TapeRecallException("Error reading from tape recall table"); - } - - if (TapeRecallStatus.getRecallTaskStatus(task.getStatusId()).equals(SUCCESS)) { - // status already updated, nothing to do - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } - - UUID groupTaskId = task.getGroupTaskId(); - boolean updated; - try { - updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); - } catch (DataAccessException e) { - log.error("Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", requestToken , groupTaskId , e.getMessage() , e); - throw new TapeRecallException("Error updating tape recall table"); - } - if (updated) { - log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}" , groupTaskId , requestToken , pfn); - } - return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); - } + private static final Logger log = LoggerFactory.getLogger(PutTapeRecallStatusLogic.class); + + /** + * @param requestToken + * @param stori + * @return + * @throws TapeRecallException + */ + public static Response serveRequest(String requestToken, StoRI stori) throws TapeRecallException { + + LocalFile localFile = stori.getLocalFile(); + boolean fileOnDisk; + + try { + fileOnDisk = localFile.isOnDisk(); + } catch (FSException e) { + log.error("Unable to test file {} presence on disk. FSException {}", + localFile.getAbsolutePath(), e.getMessage(), e); + throw new TapeRecallException("Error checking file existence"); + } + + if (!fileOnDisk) { + return Response.ok(false, TEXT_PLAIN_TYPE).status(200).build(); + } + + if (!stori.getVirtualFileSystem().getStorageClassType().isTapeEnabled()) { + // tape not enable for StoRI filesystem, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + String pfn = localFile.getAbsolutePath(); + UUID taskId = TapeRecallTO.buildTaskIdFromFileName(pfn); + TapeRecallCatalog rtCat = new TapeRecallCatalog(); + boolean exists = false; + try { + exists = rtCat.existsTask(taskId, requestToken); + } catch (DataAccessException e) { + log.error( + "Error checking existence of a recall task for taskId={} requestToken={}. DataAccessException: {}", + taskId, requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + if (!exists) { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + TapeRecallTO task; + try { + Optional tTask = rtCat.getTask(taskId, requestToken); + if (tTask.isPresent()) { + task = tTask.get(); + } else { + // no recall tasks for this file, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status because unable to retrieve groupTaskId for token {}. DataAccessException: {}", + requestToken, e.getMessage(), e); + throw new TapeRecallException("Error reading from tape recall table"); + } + + if (TapeRecallStatus.getRecallTaskStatus(task.getStatusId()).equals(SUCCESS)) { + // status already updated, nothing to do + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } + + UUID groupTaskId = task.getGroupTaskId(); + boolean updated; + try { + updated = rtCat.changeGroupTaskStatus(groupTaskId, SUCCESS, new Date()); + } catch (DataAccessException e) { + log.error( + "Unable to update task recall status for token {} with groupTaskId={}. DataAccessException : {}", + requestToken, groupTaskId, e.getMessage(), e); + throw new TapeRecallException("Error updating tape recall table"); + } + if (updated) { + log.info("Task status set to SUCCESS. groupTaskId={} requestToken={} pfn={}", groupTaskId, + requestToken, pfn); + } + return Response.ok(true, TEXT_PLAIN_TYPE).status(200).build(); + } } diff --git a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java index 7a2196f63..33454b92e 100644 --- a/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java +++ b/src/main/java/it/grid/storm/tape/recalltable/resources/TaskResource.java @@ -254,8 +254,13 @@ public void putNewTaskStatusOrRetryValue(@PathParam("groupTaskId") UUID groupTas try { - recallCatalog.changeGroupTaskStatus(groupTaskId, - TapeRecallStatus.getRecallTaskStatus(intValue), new Date()); + TapeRecallStatus updatedStatus = TapeRecallStatus.getRecallTaskStatus(intValue); + recallCatalog.changeGroupTaskStatus(groupTaskId, updatedStatus, new Date()); + // Update all PtG or BoL related + if (updatedStatus.isFinalStatus()) { + + } + } catch (DataAccessException e) { diff --git a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java index e944e5682..283d22982 100644 --- a/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java +++ b/src/test/java/it/grid/storm/tape/recalltable/resources/TaskResourceTest.java @@ -1,23 +1,24 @@ package it.grid.storm.tape.recalltable.resources; import static it.grid.storm.config.Configuration.CONFIG_FILE_PATH; +import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.BOL; +import static it.grid.storm.persistence.model.TapeRecallTO.RecallTaskType.PTG; import static it.grid.storm.tape.recalltable.resources.TaskInsertRequest.MAX_RETRY_ATTEMPTS; import static javax.ws.rs.core.Response.Status.BAD_REQUEST; import static javax.ws.rs.core.Response.Status.CREATED; import static javax.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; import static javax.ws.rs.core.Response.Status.NOT_FOUND; import static javax.ws.rs.core.Response.Status.OK; -import static junit.framework.Assert.assertNotNull; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.net.URI; -import java.util.ArrayList; import java.util.Date; import java.util.List; +import java.util.Random; import java.util.UUID; import javax.ws.rs.WebApplicationException; @@ -69,13 +70,36 @@ public class TaskResourceTest { System.setProperty(CONFIG_FILE_PATH, "storm.properties"); } + private TapeRecallTO createRandom(Date date, String voName) { + + TapeRecallTO result = new TapeRecallTO(); + Random r = new Random(); + result.setFileName("/root/" + voName + "/test/" + r.nextInt(1001)); + result.setRequestToken(TRequestToken.getRandom()); + if (r.nextInt(2) == 0) { + result.setRequestType(BOL); + } else { + result.setRequestType(PTG); + } + result.setUserID("FakeId"); + result.setRetryAttempt(0); + result.setPinLifetime(r.nextInt(1001)); + result.setVoName(voName); + result.setInsertionInstant(date); + int deferred = r.nextInt(2); + Date deferredRecallTime = new Date(date.getTime() + (deferred * (long) Math.random())); + result.setDeferredRecallInstant(deferredRecallTime); + result.setGroupTaskId(UUID.randomUUID()); + return result; + } + private TapeRecallCatalog getTapeRecallCatalogInsertSuccess(UUID groupTaskId) { TapeRecallCatalog catalog = Mockito.mock(TapeRecallCatalog.class); try { Mockito.when(catalog.insertNewTask(Mockito.any(TapeRecallTO.class))).thenReturn(groupTaskId); Mockito.when(catalog.getGroupTasks(groupTaskId)) - .thenReturn(Lists.newArrayList(TapeRecallTO.createRandom(new Date(), VFS_VONAME))); + .thenReturn(Lists.newArrayList(createRandom(new Date(), VFS_VONAME))); } catch (DataAccessException e) { e.printStackTrace(); } @@ -164,7 +188,7 @@ private void testGETTaskInfo(Response res) String requestTokenValue = location.getQuery().split("=")[1]; // prepare mocks for task info request - TapeRecallTO task = TapeRecallTO.createRandom(new Date(), VFS_VONAME); + TapeRecallTO task = createRandom(new Date(), VFS_VONAME); TRequestToken requestToken = Mockito.mock(TRequestToken.class); Mockito.when(requestToken.getValue()).thenReturn(requestTokenValue); task.setRequestToken(new TRequestToken(requestTokenValue, new Date())); @@ -173,7 +197,7 @@ private void testGETTaskInfo(Response res) // ask for task info res = recallEndpoint.getGroupTaskInfo(groupTaskId, requestTokenValue); - assertThat(res.getStatus(), equalTo(OK.getStatusCode())); + assertEquals(res.getStatus(), OK.getStatusCode()); ObjectMapper mapper = new ObjectMapper(); TapeRecallTO t = mapper.readValue(res.getEntity().toString(), TapeRecallTO.class); assertNotNull(t); @@ -194,7 +218,7 @@ public void testPOSTSuccess() .build(); Response res = recallEndpoint.postNewTask(request); assertNotNull(res.getHeaderString("Location")); - assertThat(res.getStatus(), equalTo(CREATED.getStatusCode())); + assertEquals(res.getStatus(), CREATED.getStatusCode()); testGETTaskInfo(res); } @@ -214,7 +238,7 @@ public void testPOSTSuccessWithNullVoName() .build(); Response res = recallEndpoint.postNewTask(request); assertNotNull(res.getHeaderString("Location")); - assertThat(res.getStatus(), equalTo(CREATED.getStatusCode())); + assertEquals(res.getStatus(), CREATED.getStatusCode()); testGETTaskInfo(res); } @@ -236,8 +260,8 @@ public void testPOSTNamespaceErrorOnResolvingStfnPath() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getCause(), instanceOf(NamespaceException.class)); - assertThat(e.getResponse().getStatus(), equalTo(INTERNAL_SERVER_ERROR.getStatusCode())); + assertTrue(e.getCause().getClass().equals(NamespaceException.class)); + assertEquals(e.getResponse().getStatus(), INTERNAL_SERVER_ERROR.getStatusCode()); } } @@ -257,7 +281,7 @@ public void testPOSTBadVoNameRequested() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(BAD_REQUEST.getStatusCode())); + assertEquals(e.getResponse().getStatus(), BAD_REQUEST.getStatusCode()); } } @@ -272,8 +296,8 @@ public void testPOSTUnableToMapStfnPath() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getCause(), instanceOf(ResourceNotFoundException.class)); - assertThat(e.getResponse().getStatus(), equalTo(NOT_FOUND.getStatusCode())); + assertTrue(e.getCause().getClass().equals(ResourceNotFoundException.class)); + assertEquals(e.getResponse().getStatus(), NOT_FOUND.getStatusCode()); } } @@ -294,7 +318,7 @@ public void testPOSTDbException() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(INTERNAL_SERVER_ERROR.getStatusCode())); + assertEquals(e.getResponse().getStatus(), INTERNAL_SERVER_ERROR.getStatusCode()); } } @@ -309,8 +333,8 @@ public void testPOSTValidationRequestNullFilePath() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(BAD_REQUEST.getStatusCode())); - assertThat(e.getResponse().getEntity().toString(), equalTo("Request must contain a STFN")); + assertEquals(e.getResponse().getStatus(), BAD_REQUEST.getStatusCode()); + assertEquals(e.getResponse().getEntity().toString(), "Request must contain a STFN"); } } @@ -325,8 +349,8 @@ public void testPOSTValidationRequestNullUserId() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(BAD_REQUEST.getStatusCode())); - assertThat(e.getResponse().getEntity().toString(), equalTo("Request must contain a userId")); + assertEquals(e.getResponse().getStatus(), BAD_REQUEST.getStatusCode()); + assertEquals(e.getResponse().getEntity().toString(), "Request must contain a userId"); } } @@ -342,9 +366,9 @@ public void testPOSTValidationRequestInvalidNegativeRetryAttempts() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(BAD_REQUEST.getStatusCode())); - assertThat(e.getResponse().getEntity().toString(), - equalTo("Retry attempts must be more or equal than zero.")); + assertEquals(e.getResponse().getStatus(), BAD_REQUEST.getStatusCode()); + assertEquals(e.getResponse().getEntity().toString(), + "Retry attempts must be more or equal than zero."); } } @@ -363,15 +387,15 @@ public void testPOSTValidationRequestInvalidTooManyRetryAttempts() recallEndpoint.postNewTask(request); fail(); } catch (WebApplicationException e) { - assertThat(e.getResponse().getStatus(), equalTo(BAD_REQUEST.getStatusCode())); - assertThat(e.getResponse().getEntity().toString(), - equalTo("Retry attempts must be less or equal than " + MAX_RETRY_ATTEMPTS + ".")); + assertEquals(e.getResponse().getStatus(), BAD_REQUEST.getStatusCode()); + assertEquals(e.getResponse().getEntity().toString(), + "Retry attempts must be less or equal than " + MAX_RETRY_ATTEMPTS + "."); } } private TapeRecallCatalog getTapeRecallCatalogInProgressNotEmpty() { - List emptyList = new ArrayList(); + List emptyList = Lists.newArrayList(); TapeRecallCatalog catalog = Mockito.mock(TapeRecallCatalog.class); Mockito.when(catalog.getAllInProgressTasks(Mockito.anyInt())).thenReturn(emptyList); return catalog; @@ -385,7 +409,7 @@ public void testGETTasksInProgressEmpty() TaskResource recallEndpoint = getTaskResource(getResourceService(STORI), getTapeRecallCatalogInProgressNotEmpty()); Response res = recallEndpoint.getTasks(10); - assertThat(res.getStatus(), equalTo(OK.getStatusCode())); + assertEquals(res.getStatus(), OK.getStatusCode()); }