У меня есть приложение REST, интегрированное с kubernetes для тестирования запросов REST.Теперь, когда я выполняю POST-запрос на моей стороне клиента, статус автоматически создаваемой работы остается PENDING
неопределенно долго.То же самое происходит с POD, который также создается автоматически
Когда я посмотрел глубже в события на панели инструментов, он подключает том, но не может смонтировать том и выдает эту ошибку:
Unable to mount volumes for pod "ingestion-88dhg_default(4a8dd589-e3d3-4424-bc11-27d51822d85b)": timeout expired waiting for volumes to attach or mount for pod "default"/"ingestion-88dhg". list of unmounted volumes=[cdiworkspace-volume]. list of unattached volumes=[cdiworkspace-volume default-token-qz2nb]
Я определил постоянную громкость и заявку на постоянную громкость вручную, используя следующие коды, но не подключался ни к каким модулям.Должен ли я сделать это?
PV
{
"kind": "PersistentVolume",
"apiVersion": "v1",
"metadata": {
"name": "cdiworkspace",
"selfLink": "/api/v1/persistentvolumes/cdiworkspace",
"uid": "92252f76-fe51-4225-9b63-4d6228d9e5ea",
"resourceVersion": "100026",
"creationTimestamp": "2019-07-10T09:49:04Z",
"annotations": {
"pv.kubernetes.io/bound-by-controller": "yes"
},
"finalizers": [
"kubernetes.io/pv-protection"
]
},
"spec": {
"capacity": {
"storage": "10Gi"
},
"fc": {
"targetWWNs": [
"50060e801049cfd1"
],
"lun": 0
},
"accessModes": [
"ReadWriteOnce"
],
"claimRef": {
"kind": "PersistentVolumeClaim",
"namespace": "default",
"name": "cdiworkspace",
"uid": "0ce96c77-9e0d-4b1f-88bb-ad8b84072000",
"apiVersion": "v1",
"resourceVersion": "98688"
},
"persistentVolumeReclaimPolicy": "Retain",
"storageClassName": "standard",
"volumeMode": "Block"
},
"status": {
"phase": "Bound"
}
}
PVC
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "cdiworkspace",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/persistentvolumeclaims/cdiworkspace",
"uid": "0ce96c77-9e0d-4b1f-88bb-ad8b84072000",
"resourceVersion": "100028",
"creationTimestamp": "2019-07-10T09:32:16Z",
"annotations": {
"pv.kubernetes.io/bind-completed": "yes",
"pv.kubernetes.io/bound-by-controller": "yes",
"volume.beta.kubernetes.io/storage-provisioner": "k8s.io/minikube-hostpath"
},
"finalizers": [
"kubernetes.io/pvc-protection"
]
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "10Gi"
}
},
"volumeName": "cdiworkspace",
"storageClassName": "standard",
"volumeMode": "Block"
},
"status": {
"phase": "Bound",
"accessModes": [
"ReadWriteOnce"
],
"capacity": {
"storage": "10Gi"
}
}
}
Результат journalctl -xe _SYSTEMD_UNIT=kubelet.service
Jul 01 09:47:26 rehan-B85M-HD3 kubelet[22759]: E0701 09:47:26.979098 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:47:40 rehan-B85M-HD3 kubelet[22759]: E0701 09:47:40.979722 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:47:55 rehan-B85M-HD3 kubelet[22759]: E0701 09:47:55.978806 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:48:08 rehan-B85M-HD3 kubelet[22759]: E0701 09:48:08.979375 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:48:23 rehan-B85M-HD3 kubelet[22759]: E0701 09:48:23.979463 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:48:37 rehan-B85M-HD3 kubelet[22759]: E0701 09:48:37.979005 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:48:48 rehan-B85M-HD3 kubelet[22759]: E0701 09:48:48.977686 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:49:02 rehan-B85M-HD3 kubelet[22759]: E0701 09:49:02.979125 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:49:17 rehan-B85M-HD3 kubelet[22759]: E0701 09:49:17.979408 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:49:28 rehan-B85M-HD3 kubelet[22759]: E0701 09:49:28.977499 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:49:41 rehan-B85M-HD3 kubelet[22759]: E0701 09:49:41.977771 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:49:53 rehan-B85M-HD3 kubelet[22759]: E0701 09:49:53.978605 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:50:05 rehan-B85M-HD3 kubelet[22759]: E0701 09:50:05.980251 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:50:16 rehan-B85M-HD3 kubelet[22759]: E0701 09:50:16.979292 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:50:31 rehan-B85M-HD3 kubelet[22759]: E0701 09:50:31.978346 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:50:42 rehan-B85M-HD3 kubelet[22759]: E0701 09:50:42.979302 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:50:55 rehan-B85M-HD3 kubelet[22759]: E0701 09:50:55.978043 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:51:08 rehan-B85M-HD3 kubelet[22759]: E0701 09:51:08.977540 22759 pod_workers.go:190] Error syncing pod 6577b694-f18d-4d7b-9a75-82dc17c908ca ("myplanet-d976447c6-dsfx9_default(6577b694-f18d-4d7
Jul 01 09:51:24 rehan-B85M-HD3 kubelet[22759]: E0701 09:51:24.190929 22759 remote_image.go:113] PullImage "friendly/myplanet:0.0.1-SNAPSHOT" from image service failed: rpc error: code = Unknown desc = E
Jul 01 09:51:24 rehan-B85M-HD3 kubelet[22759]: E0701 09:51:24.190971 22759 kuberuntime_image.go:51] Pull image "friendly/myplanet:0.0.1-SNAPSHOT" failed: rpc error: code = Unknown desc = Error response
Jul 01 09:51:24 rehan-B85M-HD3 kubelet[22759]: E0701 09:51:24.191024 22759 kuberuntime_manager.go:775] container start failed: ErrImagePull: rpc error: code = Unknown desc = Error response from daemon:
Развертывание Yaml
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "back",
"namespace": "default",
"selfLink": "/apis/extensions/v1beta1/namespaces/default/deployments/back",
"uid": "9f21717c-2c04-459f-b47a-95fd8e11728d",
"resourceVersion": "298987",
"generation": 1,
"creationTimestamp": "2019-07-16T13:16:15Z",
"labels": {
"run": "back"
},
"annotations": {
"deployment.kubernetes.io/revision": "1"
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"run": "back"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"run": "back"
}
},
"spec": {
"containers": [
{
"name": "back",
"image": "back:latest",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Never"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {},
"schedulerName": "default-scheduler"
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
"revisionHistoryLimit": 10,
"progressDeadlineSeconds": 600
},
"status": {
"observedGeneration": 1,
"replicas": 1,
"updatedReplicas": 1,
"unavailableReplicas": 1,
"conditions": [
{
"type": "Progressing",
"status": "True",
"lastUpdateTime": "2019-07-16T13:16:34Z",
"lastTransitionTime": "2019-07-16T13:16:15Z",
"reason": "NewReplicaSetAvailable",
"message": "ReplicaSet \"back-7fd9995747\" has successfully progressed."
},
{
"type": "Available",
"status": "False",
"lastUpdateTime": "2019-07-19T08:32:49Z",
"lastTransitionTime": "2019-07-19T08:32:49Z",
"reason": "MinimumReplicasUnavailable",
"message": "Deployment does not have minimum availability."
}
]
}
}
Dockerfile
FROM python:3.7-stretch
COPY . /code
WORKDIR /code
CMD exec /bin/bash -c "trap : TERM INT; sleep infinity & wait"
RUN pip install -r requirements.txt
ENTRYPOINT ["python", "ingestion.py"]
pyython file1
import os
import shutil
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger("ingestion")
import requests
import datahub
scihub_username = os.environ["scihub_username"]
scihub_password = os.environ["scihub_password"]
result_url = "http://" + os.environ["CDINRW_BASE_URL"] + "/jobs/" + os.environ["CDINRW_JOB_ID"] + "/results"
logger.info("Searching the Copernicus Open Access Hub")
scenes = datahub.search(username=scihub_username,
password=scihub_password,
producttype=os.getenv("producttype"),
platformname=os.getenv("platformname"),
days_back=os.getenv("days_back", 2),
footprint=os.getenv("footprint"),
max_cloud_cover_percentage=os.getenv("max_cloud_cover_percentage"),
start_date = os.getenv("start_date"),
end_date = os.getenv("end_date"))
logger.info("Found {} relevant scenes".format(len(scenes)))
job_results = []
for scene in scenes:
# do not donwload a scene that has already been ingested
if os.path.exists(os.path.join("/out_data", scene["title"]+".SAFE")):
logger.info("The scene {} already exists in /out_data and will not be downloaded again.".format(scene["title"]))
filename = scene["title"]+".SAFE"
else:
logger.info("Starting the download of scene {}".format(scene["title"]))
filename = datahub.download(scene, "/tmp", scihub_username, scihub_password, unpack=True)
logger.info("The download was successful.")
shutil.move(filename, "/out_data")
result_message = {"description": "test",
"type": "Raster",
"format": "SAFE",
"filename": os.path.basename(filename)}
job_results.append(result_message)
res = requests.put(result_url, json=job_results, timeout=60)
res.raise_for_status()
** python file 2 **
import logging
import os
import urllib.parse
import zipfile
import requests
# constructing URLs for querying the data hub
_BASE_URL = "https://scihub.copernicus.eu/dhus/"
SITE = {}
SITE["SEARCH"] = _BASE_URL + "search?format=xml&sortedby=beginposition&order=desc&rows=100&start={offset}&q="
_PRODUCT_URL = _BASE_URL + "odata/v1/Products('{uuid}')/"
SITE["CHECKSUM"] = _PRODUCT_URL + "Checksum/Value/$value"
SITE["SAFEZIP"] = _PRODUCT_URL + "$value"
logger = logging.getLogger(__name__)
def _build_search_url(producttype=None, platformname=None, days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
search_terms = []
if producttype:
search_terms.append("producttype:{}".format(producttype))
if platformname:
search_terms.append("platformname:{}".format(platformname))
if start_date and end_date:
search_terms.append(
"beginPosition:[{}+TO+{}]".format(start_date, end_date))
elif days_back:
search_terms.append(
"beginPosition:[NOW-{}DAYS+TO+NOW]".format(days_back))
if footprint:
search_terms.append("footprint:%22Intersects({})%22".format(
footprint.replace(" ", "+")))
if max_cloud_cover_percentage:
search_terms.append("cloudcoverpercentage:[0+TO+{}]".format(max_cloud_cover_percentage))
url = SITE["SEARCH"] + "+AND+".join(search_terms)
return url
def _unpack(zip_file, directory, remove_after=False):
with zipfile.ZipFile(zip_file) as zf:
# This assumes that the zipfile only contains the .SAFE directory at root level
safe_path = zf.namelist()[0]
zf.extractall(path=directory)
if remove_after:
os.remove(zip_file)
return os.path.normpath(os.path.join(directory, safe_path))
def search(username, password, producttype=None, platformname=None ,days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
""" Search the Copernicus SciHub
Parameters
----------
username : str
user name for the Copernicus SciHub
password : str
password for the Copernicus SciHub
producttype : str, optional
product type to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
platformname : str, optional
plattform name to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
days_back : int, optional
number of days before today that will be searched. Default are the last 2 days. If start and end date are set the days_back parameter is ignored
footprint : str, optional
well-known-text representation of the footprint
max_cloud_cover_percentage: str, optional
percentage of cloud cover per scene. Can only be used in combination with Sentinel-2 imagery.
(see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
start_date: str, optional
start point of the search extent has to be used in combination with end_date
end_date: str, optional
end_point of the search extent has to be used in combination with start_date
Returns
-------
list
a list of scenes that match the search parameters
"""
import xml.etree.cElementTree as ET
scenes = []
search_url = _build_search_url(producttype, platformname, days_back, footprint, max_cloud_cover_percentage, start_date, end_date)
logger.info("Search URL: {}".format(search_url))
offset = 0
rowsBreak = 5000
name_space = {"atom": "http://www.w3.org/2005/Atom",
"opensearch": "http://a9.com/-/spec/opensearch/1.1/"}
while offset < rowsBreak: # Next pagination page:
response = requests.get(search_url.format(offset=offset), auth=(username, password))
root = ET.fromstring(response.content)
if offset == 0:
rowsBreak = int(
root.find("opensearch:totalResults", name_space).text)
for e in root.iterfind("atom:entry", name_space):
uuid = e.find("atom:id", name_space).text
title = e.find("atom:title", name_space).text
begin_position = e.find(
"atom:date[@name='beginposition']", name_space).text
end_position = e.find(
"atom:date[@name='endposition']", name_space).text
footprint = e.find("atom:str[@name='footprint']", name_space).text
scenes.append({
"id": uuid,
"title": title,
"begin_position": begin_position,
"end_position": end_position,
"footprint": footprint})
# Ultimate DHuS pagination page size limit (rows per page).
offset += 100
return scenes
def download(scene, directory, username, password, unpack=True):
""" Download a Sentinel scene based on its uuid
Parameters
----------
scene : dict
the scene to be downloaded
path : str
the path where the file will be downloaded to
username : str
username for the Copernicus SciHub
password : str
password for the Copernicus SciHub
unpack: boolean, optional
flag that defines whether the downloaded product should be unpacked after download. defaults to true
Raises
------
ValueError
if the size of the downloaded file does not match the Content-Length header
ValueError
if the checksum of the downloaded file does not match the checksum provided by the Copernicus SciHub
Returns
-------
str
path to the downloaded file
"""
import hashlib
md5hash = hashlib.md5()
md5sum = requests.get(SITE["CHECKSUM"].format(
uuid=scene["id"]), auth=(username, password)).text
download_path = os.path.join(directory, scene["title"] + ".zip")
# overwrite if path already exists
if os.path.exists(download_path):
os.remove(download_path)
url = SITE["SAFEZIP"].format(uuid=scene["id"])
rsp = requests.get(url, auth=(username, password), stream=True)
cl = rsp.headers.get("Content-Length")
size = int(cl) if cl else -1
# Actually fetch now:
with open(download_path, "wb") as f: # Do not read as a whole into memory:
written = 0
for block in rsp.iter_content(8192):
f.write(block)
written += len(block)
md5hash.update(block)
written = os.path.getsize(download_path)
if size > -1 and written != size:
raise ValueError("{}: size mismatch, {} bytes written but expected {} bytes to write!".format(
download_path, written, size))
elif md5sum:
calculated = md5hash.hexdigest()
expected = md5sum.lower()
if calculated != expected:
raise ValueError("{}: MD5 mismatch, calculated {} but expected {}!".format(
download_path, calculated, expected))
if unpack:
return _unpack(download_path, directory, remove_after=False)
else:
return download_path
Как правильно и автоматически смонтировать том на модуль? я не хочу создавать модули вручную для каждой службы REST и назначать им тома