forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-3870] EOL character enforcement
We have shell scripts and Windows batch files, so we should enforce proper EOL character. Author: Kousuke Saruta <[email protected]> Closes apache#2726 from sarutak/eol-enforcement and squashes the following commits: 9748c3f [Kousuke Saruta] Fixed make.bat 252de89 [Kousuke Saruta] Removed extra characters from make.bat 5b81c00 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into eol-enforcement 8633ed2 [Kousuke Saruta] merge branch 'master' of git://git.apache.org/spark into eol-enforcement 5d630d8 [Kousuke Saruta] Merged ba10797 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into eol-enforcement 7407515 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into eol-enforcement 772fd4e [Kousuke Saruta] Normized EOL character in make.bat and compute-classpath.cmd ac7f873 [Kousuke Saruta] Added an entry for .gitattributes to .rat-excludes 1570e77 [Kousuke Saruta] Added .gitattributes
- Loading branch information
Showing
5 changed files
with
369 additions
and
366 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
*.bat text eol=crlf | ||
*.cmd text eol=crlf |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
target | ||
.gitignore | ||
.gitattributes | ||
.project | ||
.classpath | ||
.mima-excludes | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,117 +1,117 @@ | ||
@echo off | ||
|
||
rem | ||
rem Licensed to the Apache Software Foundation (ASF) under one or more | ||
rem contributor license agreements. See the NOTICE file distributed with | ||
rem this work for additional information regarding copyright ownership. | ||
rem The ASF licenses this file to You under the Apache License, Version 2.0 | ||
rem (the "License"); you may not use this file except in compliance with | ||
rem the License. You may obtain a copy of the License at | ||
rem | ||
rem http://www.apache.org/licenses/LICENSE-2.0 | ||
rem | ||
rem Unless required by applicable law or agreed to in writing, software | ||
rem distributed under the License is distributed on an "AS IS" BASIS, | ||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
rem See the License for the specific language governing permissions and | ||
rem limitations under the License. | ||
rem | ||
|
||
rem This script computes Spark's classpath and prints it to stdout; it's used by both the "run" | ||
rem script and the ExecutorRunner in standalone cluster mode. | ||
|
||
rem If we're called from spark-class2.cmd, it already set enabledelayedexpansion and setting | ||
rem it here would stop us from affecting its copy of the CLASSPATH variable; otherwise we | ||
rem need to set it here because we use !datanucleus_jars! below. | ||
if "%DONT_PRINT_CLASSPATH%"=="1" goto skip_delayed_expansion | ||
setlocal enabledelayedexpansion | ||
:skip_delayed_expansion | ||
|
||
set SCALA_VERSION=2.10 | ||
|
||
rem Figure out where the Spark framework is installed | ||
set FWDIR=%~dp0..\ | ||
|
||
rem Load environment variables from conf\spark-env.cmd, if it exists | ||
if exist "%FWDIR%conf\spark-env.cmd" call "%FWDIR%conf\spark-env.cmd" | ||
|
||
rem Build up classpath | ||
set CLASSPATH=%SPARK_CLASSPATH%;%SPARK_SUBMIT_CLASSPATH% | ||
|
||
if not "x%SPARK_CONF_DIR%"=="x" ( | ||
set CLASSPATH=%CLASSPATH%;%SPARK_CONF_DIR% | ||
) else ( | ||
set CLASSPATH=%CLASSPATH%;%FWDIR%conf | ||
) | ||
|
||
if exist "%FWDIR%RELEASE" ( | ||
for %%d in ("%FWDIR%lib\spark-assembly*.jar") do ( | ||
set ASSEMBLY_JAR=%%d | ||
) | ||
) else ( | ||
for %%d in ("%FWDIR%assembly\target\scala-%SCALA_VERSION%\spark-assembly*hadoop*.jar") do ( | ||
set ASSEMBLY_JAR=%%d | ||
) | ||
) | ||
|
||
set CLASSPATH=%CLASSPATH%;%ASSEMBLY_JAR% | ||
|
||
rem When Hive support is needed, Datanucleus jars must be included on the classpath. | ||
rem Datanucleus jars do not work if only included in the uber jar as plugin.xml metadata is lost. | ||
rem Both sbt and maven will populate "lib_managed/jars/" with the datanucleus jars when Spark is | ||
rem built with Hive, so look for them there. | ||
if exist "%FWDIR%RELEASE" ( | ||
set datanucleus_dir=%FWDIR%lib | ||
) else ( | ||
set datanucleus_dir=%FWDIR%lib_managed\jars | ||
) | ||
set "datanucleus_jars=" | ||
for %%d in ("%datanucleus_dir%\datanucleus-*.jar") do ( | ||
set datanucleus_jars=!datanucleus_jars!;%%d | ||
) | ||
set CLASSPATH=%CLASSPATH%;%datanucleus_jars% | ||
|
||
set SPARK_CLASSES=%FWDIR%core\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%repl\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%mllib\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%bagel\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%graphx\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%streaming\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%tools\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\catalyst\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\core\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\hive\target\scala-%SCALA_VERSION%\classes | ||
|
||
set SPARK_TEST_CLASSES=%FWDIR%core\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%repl\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%mllib\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%bagel\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%graphx\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%streaming\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\catalyst\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\core\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\hive\target\scala-%SCALA_VERSION%\test-classes | ||
|
||
if "x%SPARK_TESTING%"=="x1" ( | ||
rem Add test clases to path - note, add SPARK_CLASSES and SPARK_TEST_CLASSES before CLASSPATH | ||
rem so that local compilation takes precedence over assembled jar | ||
set CLASSPATH=%SPARK_CLASSES%;%SPARK_TEST_CLASSES%;%CLASSPATH% | ||
) | ||
|
||
rem Add hadoop conf dir - else FileSystem.*, etc fail | ||
rem Note, this assumes that there is either a HADOOP_CONF_DIR or YARN_CONF_DIR which hosts | ||
rem the configurtion files. | ||
if "x%HADOOP_CONF_DIR%"=="x" goto no_hadoop_conf_dir | ||
set CLASSPATH=%CLASSPATH%;%HADOOP_CONF_DIR% | ||
:no_hadoop_conf_dir | ||
|
||
if "x%YARN_CONF_DIR%"=="x" goto no_yarn_conf_dir | ||
set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR% | ||
:no_yarn_conf_dir | ||
|
||
rem A bit of a hack to allow calling this script within run2.cmd without seeing output | ||
if "%DONT_PRINT_CLASSPATH%"=="1" goto exit | ||
|
||
echo %CLASSPATH% | ||
|
||
:exit | ||
@echo off | ||
|
||
rem | ||
rem Licensed to the Apache Software Foundation (ASF) under one or more | ||
rem contributor license agreements. See the NOTICE file distributed with | ||
rem this work for additional information regarding copyright ownership. | ||
rem The ASF licenses this file to You under the Apache License, Version 2.0 | ||
rem (the "License"); you may not use this file except in compliance with | ||
rem the License. You may obtain a copy of the License at | ||
rem | ||
rem http://www.apache.org/licenses/LICENSE-2.0 | ||
rem | ||
rem Unless required by applicable law or agreed to in writing, software | ||
rem distributed under the License is distributed on an "AS IS" BASIS, | ||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
rem See the License for the specific language governing permissions and | ||
rem limitations under the License. | ||
rem | ||
|
||
rem This script computes Spark's classpath and prints it to stdout; it's used by both the "run" | ||
rem script and the ExecutorRunner in standalone cluster mode. | ||
|
||
rem If we're called from spark-class2.cmd, it already set enabledelayedexpansion and setting | ||
rem it here would stop us from affecting its copy of the CLASSPATH variable; otherwise we | ||
rem need to set it here because we use !datanucleus_jars! below. | ||
if "%DONT_PRINT_CLASSPATH%"=="1" goto skip_delayed_expansion | ||
setlocal enabledelayedexpansion | ||
:skip_delayed_expansion | ||
|
||
set SCALA_VERSION=2.10 | ||
|
||
rem Figure out where the Spark framework is installed | ||
set FWDIR=%~dp0..\ | ||
|
||
rem Load environment variables from conf\spark-env.cmd, if it exists | ||
if exist "%FWDIR%conf\spark-env.cmd" call "%FWDIR%conf\spark-env.cmd" | ||
|
||
rem Build up classpath | ||
set CLASSPATH=%SPARK_CLASSPATH%;%SPARK_SUBMIT_CLASSPATH% | ||
|
||
if not "x%SPARK_CONF_DIR%"=="x" ( | ||
set CLASSPATH=%CLASSPATH%;%SPARK_CONF_DIR% | ||
) else ( | ||
set CLASSPATH=%CLASSPATH%;%FWDIR%conf | ||
) | ||
|
||
if exist "%FWDIR%RELEASE" ( | ||
for %%d in ("%FWDIR%lib\spark-assembly*.jar") do ( | ||
set ASSEMBLY_JAR=%%d | ||
) | ||
) else ( | ||
for %%d in ("%FWDIR%assembly\target\scala-%SCALA_VERSION%\spark-assembly*hadoop*.jar") do ( | ||
set ASSEMBLY_JAR=%%d | ||
) | ||
) | ||
|
||
set CLASSPATH=%CLASSPATH%;%ASSEMBLY_JAR% | ||
|
||
rem When Hive support is needed, Datanucleus jars must be included on the classpath. | ||
rem Datanucleus jars do not work if only included in the uber jar as plugin.xml metadata is lost. | ||
rem Both sbt and maven will populate "lib_managed/jars/" with the datanucleus jars when Spark is | ||
rem built with Hive, so look for them there. | ||
if exist "%FWDIR%RELEASE" ( | ||
set datanucleus_dir=%FWDIR%lib | ||
) else ( | ||
set datanucleus_dir=%FWDIR%lib_managed\jars | ||
) | ||
set "datanucleus_jars=" | ||
for %%d in ("%datanucleus_dir%\datanucleus-*.jar") do ( | ||
set datanucleus_jars=!datanucleus_jars!;%%d | ||
) | ||
set CLASSPATH=%CLASSPATH%;%datanucleus_jars% | ||
|
||
set SPARK_CLASSES=%FWDIR%core\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%repl\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%mllib\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%bagel\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%graphx\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%streaming\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%tools\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\catalyst\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\core\target\scala-%SCALA_VERSION%\classes | ||
set SPARK_CLASSES=%SPARK_CLASSES%;%FWDIR%sql\hive\target\scala-%SCALA_VERSION%\classes | ||
|
||
set SPARK_TEST_CLASSES=%FWDIR%core\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%repl\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%mllib\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%bagel\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%graphx\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%streaming\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\catalyst\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\core\target\scala-%SCALA_VERSION%\test-classes | ||
set SPARK_TEST_CLASSES=%SPARK_TEST_CLASSES%;%FWDIR%sql\hive\target\scala-%SCALA_VERSION%\test-classes | ||
|
||
if "x%SPARK_TESTING%"=="x1" ( | ||
rem Add test clases to path - note, add SPARK_CLASSES and SPARK_TEST_CLASSES before CLASSPATH | ||
rem so that local compilation takes precedence over assembled jar | ||
set CLASSPATH=%SPARK_CLASSES%;%SPARK_TEST_CLASSES%;%CLASSPATH% | ||
) | ||
|
||
rem Add hadoop conf dir - else FileSystem.*, etc fail | ||
rem Note, this assumes that there is either a HADOOP_CONF_DIR or YARN_CONF_DIR which hosts | ||
rem the configurtion files. | ||
if "x%HADOOP_CONF_DIR%"=="x" goto no_hadoop_conf_dir | ||
set CLASSPATH=%CLASSPATH%;%HADOOP_CONF_DIR% | ||
:no_hadoop_conf_dir | ||
|
||
if "x%YARN_CONF_DIR%"=="x" goto no_yarn_conf_dir | ||
set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR% | ||
:no_yarn_conf_dir | ||
|
||
rem A bit of a hack to allow calling this script within run2.cmd without seeing output | ||
if "%DONT_PRINT_CLASSPATH%"=="1" goto exit | ||
|
||
echo %CLASSPATH% | ||
|
||
:exit |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
@ECHO OFF | ||
|
||
rem This is the entry point for running Sphinx documentation. To avoid polluting the | ||
rem environment, it just launches a new cmd to do the real work. | ||
|
||
cmd /V /E /C %~dp0make2.bat %* | ||
@ECHO OFF | ||
|
||
rem This is the entry point for running Sphinx documentation. To avoid polluting the | ||
rem environment, it just launches a new cmd to do the real work. | ||
|
||
cmd /V /E /C %~dp0make2.bat %* |
Oops, something went wrong.