Revision 972338ae771c99fc63acb5f75fdfa2f6d2c0ffab authored by Dongjoon Hyun on 27 June 2022, 08:29:57 UTC, committed by Dongjoon Hyun on 27 June 2022, 08:31:01 UTC
This PR aims to fix a regression at Apache Spark 3.3.0 which doesn't allow long pod name prefix whose length is greater than 63.

Although Pod's `hostname` follows [DNS Label Names](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names), Pod name itself follows [DNS Subdomain Names](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names) whose maximum length is 253.

Yes, this fixes a regression.

Pass the CIs with the updated unit tests.

Closes #36999 from dongjoon-hyun/SPARK-39614.

Authored-by: Dongjoon Hyun <dongjoon@apache.org>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
(cherry picked from commit c15508f0d6a49738db5edf7eb139cc1d438af9a9)
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
1 parent 427148f
Raw File
appveyor.yml
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

version: "{build}-{branch}"

shallow_clone: true

platform: x64
configuration: Debug

branches:
  only:
    - master

only_commits:
  files:
    - appveyor.yml
    - dev/appveyor-install-dependencies.ps1
    - R/
    - sql/core/src/main/scala/org/apache/spark/sql/api/r/
    - core/src/main/scala/org/apache/spark/api/r/
    - mllib/src/main/scala/org/apache/spark/ml/r/
    - core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala
    - bin/*.cmd

cache:
  - C:\Users\appveyor\.m2

install:
  # Install SBT and dependencies
  - ps: .\dev\appveyor-install-dependencies.ps1
  # Required package for R unit tests. xml2 is required to use jUnit reporter in testthat.
  - cmd: Rscript -e "install.packages(c('knitr', 'rmarkdown', 'testthat', 'e1071', 'survival', 'arrow', 'xml2'), repos='https://cloud.r-project.org/')"
  - cmd: Rscript -e "pkg_list <- as.data.frame(installed.packages()[,c(1, 3:4)]); pkg_list[is.na(pkg_list$Priority), 1:2, drop = FALSE]"

build_script:
  # '-Djna.nosys=true' is required to avoid kernel32.dll load failure.
  # See SPARK-28759.
  # Ideally we should check the tests related to Hive in SparkR as well (SPARK-31745).
  - cmd: set SBT_MAVEN_PROFILES=-Psparkr
  - cmd: set SBT_OPTS=-Djna.nosys=true -Dfile.encoding=UTF-8 -Xms4096m -Xms4096m -XX:ReservedCodeCacheSize=128m
  - cmd: sbt package
  - cmd: set SBT_MAVEN_PROFILES=
  - cmd: set SBT_OPTS=

environment:
  NOT_CRAN: true
  # See SPARK-27848. Currently installing some dependent packages causes
  # "(converted from warning) unable to identify current timezone 'C':" for an unknown reason.
  # This environment variable works around to test SparkR against a higher version.
  R_REMOTES_NO_ERRORS_FROM_WARNINGS: true

test_script:
  - cmd: .\bin\spark-submit2.cmd --driver-java-options "-Dlog4j.configuration=file:///%CD:\=/%/R/log4j.properties" --conf spark.hadoop.fs.defaultFS="file:///" R\pkg\tests\run-all.R

notifications:
  - provider: Email
    on_build_success: false
    on_build_failure: false
    on_build_status_changed: false
back to top