3 # Copyright 2016 RIFT.IO Inc
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
25 import rift
.mano
.cloud
28 gi
.require_version('RwImageMgmtYang', '1.0')
29 from gi
.repository
import (
34 class UploadJobError(Exception):
38 class ImageUploadTaskError(Exception):
42 class ImageUploadError(ImageUploadTaskError
):
46 class ImageListError(ImageUploadTaskError
):
50 class ImageUploadJobController(object):
51 """ This class starts and manages ImageUploadJobs """
52 MAX_COMPLETED_JOBS
= 20
54 def __init__(self
, log
, loop
, project
, max_completed_jobs
=MAX_COMPLETED_JOBS
):
57 self
._project
= project
58 self
._job
_id
_gen
= itertools
.count(1)
59 self
._max
_completed
_jobs
= max_completed_jobs
62 self
._completed
_jobs
= collections
.deque(
63 maxlen
=self
._max
_completed
_jobs
68 """ the UploadJobs protobuf message """
69 upload_jobs_msg
= RwImageMgmtYang
.UploadJobs()
70 for job
in self
._jobs
.values():
71 upload_jobs_msg
.job
.append(job
.pb_msg
)
73 return upload_jobs_msg
77 """ the tracked list of ImageUploadJobs """
78 return self
._jobs
.values()
81 def completed_jobs(self
):
82 """ completed jobs in the tracked list of ImageUploadJobs """
83 return [job
for job
in self
._jobs
.values() if job
in self
._completed
_jobs
]
86 def active_jobs(self
):
87 """ in-progress jobs in the tracked list of ImageUploadJobs """
88 return [job
for job
in self
._jobs
.values() if job
not in self
._completed
_jobs
]
90 def _add_job(self
, job
):
91 self
._jobs
[job
.id] = job
93 def _start_job(self
, job
, on_completed
=None):
94 def on_job_completed(_
):
95 self
._log
.debug("%s completed. Adding to completed jobs list.", job
)
97 # If adding a new completed job is going to overflow the
98 # completed job list, find the first job that completed and
99 # remove it from the tracked jobs.
100 if len(self
._completed
_jobs
) == self
._completed
_jobs
.maxlen
:
101 first_completed_job
= self
._completed
_jobs
[-1]
102 del self
._jobs
[first_completed_job
.id]
104 self
._completed
_jobs
.appendleft(job
)
106 job_future
= job
.start()
107 job_future
.add_done_callback(on_job_completed
)
109 if on_completed
is not None:
110 job_future
.add_done_callback(on_completed
)
112 def get_job(self
, job_id
):
113 """ Get the UploadJob from the job id
116 job_id - the job id that was previously added to the controller
119 The associated ImageUploadJob
122 LookupError - Could not find the job id
124 if job_id
not in self
._jobs
:
125 raise LookupError("Could not find job_id %s" % job_id
)
127 return self
._jobs
[job_id
]
129 def create_job(self
, image_tasks
, on_completed
=None):
130 """ Create and start a ImageUploadJob from a list of ImageUploadTasks
133 image_tasks - a list of ImageUploadTasks
134 on_completed - a callback which is added to the job future
139 self
._log
.debug("Creating new job from %s image tasks", len(image_tasks
))
140 new_job
= ImageUploadJob(
144 job_id
=next(self
._job
_id
_gen
)
147 self
._add
_job
(new_job
)
148 self
._start
_job
(new_job
, on_completed
=on_completed
)
153 class ImageUploadJob(object):
154 """ This class manages a set of ImageUploadTasks
156 In order to push an image (or set of images) to many cloud accounts, and get a single
157 status on that operation, we need a single status that represents all of those tasks.
159 The ImageUploadJob provides a single endpoint to control all the tasks and report
160 when all images are successfully upload or when any one fails.
162 STATES
= ("QUEUED", "IN_PROGRESS", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
163 TIMEOUT_JOB
= 6 * 60 * 60 # 6 hours
164 JOB_GEN
= itertools
.count(1)
166 def __init__(self
, log
, loop
, upload_tasks
, job_id
=None, timeout_job
=TIMEOUT_JOB
):
169 self
._upload
_tasks
= upload_tasks
170 self
._job
_id
= next(ImageUploadJob
.JOB_GEN
) if job_id
is None else job_id
171 self
._timeout
_job
= timeout_job
173 self
._state
= "QUEUED"
174 self
._state
_stack
= [self
._state
]
176 self
._start
_time
= time
.time()
179 self
._task
_future
_map
= {}
180 self
._job
_future
= None
183 return "{}(job_id={}, state={})".format(
184 self
.__class
__.__name
__, self
._job
_id
, self
._state
193 """ The state of the ImageUploadJob """
197 def state(self
, new_state
):
198 """ Set the state of the ImageUploadJob """
199 states
= ImageUploadJob
.STATES
200 assert new_state
in states
201 assert states
.index(new_state
) >= states
.index(self
._state
)
202 self
._state
_stack
.append(new_state
)
204 self
._state
= new_state
207 def state_stack(self
):
208 """ The list of states that this job progressed through """
209 return self
._state
_stack
213 """ The UploadJob protobuf message """
214 task
= RwImageMgmtYang
.UploadJob
.from_dict({
216 "status": self
._state
,
217 "start_time": self
._start
_time
,
218 "upload_tasks": [task
.pb_msg
for task
in self
._upload
_tasks
]
222 task
.stop_time
= self
._stop
_time
226 def _start_upload_tasks(self
):
227 self
._log
.debug("Starting %s upload tasks", len(self
._upload
_tasks
))
229 for upload_task
in self
._upload
_tasks
:
233 def _wait_for_upload_tasks(self
):
234 self
._log
.debug("Waiting for upload tasks to complete")
236 wait_coroutines
= [t
.wait() for t
in self
._upload
_tasks
]
238 yield from asyncio
.wait(
240 timeout
=self
._timeout
_job
,
244 self
._log
.debug("All upload tasks completed")
246 def _set_final_job_state(self
):
248 for task
in self
._upload
_tasks
:
249 if task
.state
!= "COMPLETED":
250 failed_tasks
.append(task
)
253 self
._log
.error("%s had %s FAILED tasks.", self
, len(failed_tasks
))
254 self
.state
= "FAILED"
256 self
._log
.debug("%s tasks completed successfully", len(self
._upload
_tasks
))
257 self
.state
= "COMPLETED"
260 def _cancel_job(self
):
261 for task
in self
._upload
_tasks
:
264 # TODO: Wait for all tasks to actually reach terminal
267 self
.state
= "CANCELLED"
271 self
.state
= "IN_PROGRESS"
272 self
._start
_upload
_tasks
()
274 yield from self
._wait
_for
_upload
_tasks
()
275 except asyncio
.CancelledError
:
276 self
._log
.debug("%s was cancelled. Cancelling all tasks.",
278 self
._loop
.create_task(self
._cancel
_job
())
281 self
._stop
_time
= time
.time()
282 self
._job
_future
= None
284 self
._set
_final
_job
_state
()
288 """ Wait for the job to reach a terminal state """
289 if self
._job
_future
is None:
290 raise UploadJobError("Job not started")
292 yield from asyncio
.wait_for(
299 """ Start the job and all child tasks """
300 if self
._state
!= "QUEUED":
301 raise UploadJobError("Job already started")
303 self
._job
_future
= self
._loop
.create_task(self
._do
_job
())
304 return self
._job
_future
307 """ Stop the job and all child tasks """
308 if self
._job
_future
is not None:
309 self
.state
= "CANCELLING"
310 self
._job
_future
.cancel()
313 class ByteRateCalculator(object):
314 """ This class produces a byte rate from inputted measurements"""
315 def __init__(self
, rate_time_constant
):
317 self
._time
_constant
= rate_time_constant
323 def add_measurement(self
, num_bytes
, time_delta
):
324 rate
= num_bytes
/ time_delta
328 self
._rate
+= ((rate
- self
._rate
) / self
._time
_constant
)
333 class UploadProgressWriteProxy(object):
334 """ This class implements a write proxy with produces various progress stats
336 In order to keep the complexity of the UploadTask down, this class acts as a
337 proxy for a file write. By providing the original handle to be written to
338 and having the client class call write() on this object, we can produce the
339 various statistics to be consumed.
341 RATE_TIME_CONSTANT
= 5
343 def __init__(self
, log
, loop
, bytes_total
, write_hdl
):
346 self
._bytes
_total
= bytes_total
347 self
._write
_hdl
= write_hdl
349 self
._bytes
_written
= 0
352 self
._rate
_calc
= ByteRateCalculator(UploadProgressWriteProxy
.RATE_TIME_CONSTANT
)
353 self
._rate
_task
= None
355 def write(self
, data
):
356 self
._write
_hdl
.write(data
)
357 self
._bytes
_written
+= len(data
)
360 self
._write
_hdl
.close()
361 if self
._rate
_task
is not None:
362 self
._log
.debug("stopping rate monitoring task")
363 self
._rate
_task
.cancel()
365 def start_rate_monitoring(self
):
366 """ Start the rate monitoring task """
368 def periodic_rate_task():
370 start_time
= time
.time()
371 start_bytes
= self
._bytes
_written
372 yield from asyncio
.sleep(1, loop
=self
._loop
)
373 time_period
= time
.time() - start_time
374 num_bytes
= self
._bytes
_written
- start_bytes
376 self
._byte
_rate
= self
._rate
_calc
.add_measurement(num_bytes
, time_period
)
378 self
._log
.debug("starting rate monitoring task")
379 self
._rate
_task
= self
._loop
.create_task(periodic_rate_task())
382 def progress_percent(self
):
383 if self
._bytes
_total
== 0:
386 return int(self
._bytes
_written
/ self
._bytes
_total
* 100)
389 def bytes_written(self
):
390 return self
._bytes
_written
393 def bytes_total(self
):
394 return self
._bytes
_total
397 def bytes_rate(self
):
398 return self
._byte
_rate
401 class GlanceImagePipeGen(object):
402 """ This class produces a read file handle from a generator that produces bytes
404 The CAL API takes a file handle as an input. The Glance API creates a generator
405 that produces byte strings. This class acts as the mediator by creating a pipe
406 and pumping the bytestring from the generator into the write side of the pipe.
408 A pipe has the useful feature here that it will block at the buffer size until
409 the reader has consumed. This allows us to only pull from glance and push at the
410 pace of the reader preventing us from having to store the images locally on disk.
412 def __init__(self
, log
, loop
, data_gen
):
415 self
._data
_gen
= data_gen
417 read_fd
, write_fd
= os
.pipe()
419 self
._read
_hdl
= os
.fdopen(read_fd
, 'rb')
420 self
._write
_hdl
= os
.fdopen(write_fd
, 'wb')
421 self
._close
_hdl
= self
._write
_hdl
425 return self
._write
_hdl
428 def write_hdl(self
, new_write_hdl
):
429 self
._write
_hdl
= new_write_hdl
433 return self
._read
_hdl
435 def _gen_writer(self
):
436 self
._log
.debug("starting image data write to pipe")
438 for data
in self
._data
_gen
:
440 self
._write
_hdl
.write(data
)
441 except (BrokenPipeError
, ValueError) as e
:
442 self
._log
.warning("write pipe closed: %s", str(e
))
445 except Exception as e
:
446 self
._log
.exception("error when writing data to pipe: %s", str(e
))
449 self
._log
.debug("closing write side of pipe")
451 self
._write
_hdl
.close()
456 t
= threading
.Thread(target
=self
._gen
_writer
)
461 self
._log
.debug("stop requested, closing write side of pipe")
462 self
._write
_hdl
.close()
465 class AccountImageUploadTask(object):
466 """ This class manages an create_image task from an image info and file handle
468 Manage the upload of a image to a configured cloud account.
470 STATES
= ("QUEUED", "CHECK_IMAGE_EXISTS", "UPLOADING", "CANCELLING", "CANCELLED", "COMPLETED", "FAILED")
472 TIMEOUT_CHECK_EXISTS
= 10
473 TIMEOUT_IMAGE_UPLOAD
= 6 * 60 * 60 # 6 hours
475 def __init__(self
, log
, loop
, account
, image_info
, image_hdl
,
476 timeout_exists
=TIMEOUT_CHECK_EXISTS
, timeout_upload
=TIMEOUT_IMAGE_UPLOAD
,
477 progress_info
=None, write_canceller
=None
481 self
._account
= account
482 self
._image
_info
= image_info
.deep_copy()
483 self
._image
_hdl
= image_hdl
485 self
._timeout
_exists
= timeout_exists
486 self
._timeout
_upload
= timeout_upload
488 self
._progress
_info
= progress_info
489 self
._write
_canceller
= write_canceller
491 self
._state
= "QUEUED"
492 self
._state
_stack
= [self
._state
]
494 self
._detail
= "Task is waiting to be started"
495 self
._start
_time
= time
.time()
497 self
._upload
_future
= None
499 if not image_info
.has_field("name"):
500 raise ValueError("image info must have name field")
507 def state(self
, new_state
):
508 states
= AccountImageUploadTask
.STATES
509 assert new_state
in states
510 assert states
.index(new_state
) >= states
.index(self
._state
)
511 self
._state
_stack
.append(new_state
)
513 self
._state
= new_state
516 def state_stack(self
):
517 return self
._state
_stack
521 """ The image name being uploaded """
522 return self
._image
_info
.id
525 def image_name(self
):
526 """ The image name being uploaded """
527 return self
._image
_info
.name
530 def image_checksum(self
):
531 """ The image checksum being uploaded """
532 if self
._image
_info
.has_field("checksum"):
533 return self
._image
_info
.checksum
538 def cloud_account(self
):
539 """ The cloud account name which the image is being uploaded to """
540 return self
._account
.name
544 """ The UploadTask protobuf message """
545 task
= RwImageMgmtYang
.UploadTask
.from_dict({
546 "cloud_account": self
.cloud_account
,
547 "image_id": self
.image_id
,
548 "image_name": self
.image_name
,
549 "status": self
.state
,
550 "detail": self
._detail
,
551 "start_time": self
._start
_time
,
554 if self
.image_checksum
is not None:
555 task
.image_checksum
= self
.image_checksum
558 task
.stop_time
= self
._stop
_time
560 if self
._progress
_info
:
561 task
.bytes_written
= self
._progress
_info
.bytes_written
562 task
.bytes_total
= self
._progress
_info
.bytes_total
563 task
.progress_percent
= self
._progress
_info
.progress_percent
564 task
.bytes_per_second
= self
._progress
_info
.bytes_rate
566 if self
.state
== "COMPLETED":
567 task
.progress_percent
= 100
571 def _get_account_images(self
):
573 self
._log
.debug("getting image list for account {}".format(self
._account
.name
))
575 account_images
= self
._account
.get_image_list()
576 except rift
.mano
.cloud
.CloudAccountCalError
as e
:
577 msg
= "could not get image list for account {}".format(self
._account
.name
)
579 raise ImageListError(msg
) from e
581 return account_images
583 def _has_existing_image(self
):
584 account
= self
._account
586 account_images
= self
._get
_account
_images
()
588 matching_images
= [i
for i
in account_images
if i
.name
== self
.image_name
]
590 if self
.image_checksum
is not None:
591 matching_images
= [i
for i
in matching_images
if i
.checksum
== self
.image_checksum
]
594 self
._log
.debug("found matching image with checksum in account %s",
598 self
._log
.debug("did not find matching image with checksum in account %s",
602 def _upload_image(self
):
603 image
= self
._image
_info
604 account
= self
._account
606 image
.fileno
= self
._image
_hdl
.fileno()
608 self
._log
.debug("uploading to account {}: {}".format(account
.name
, image
))
610 image
.id = account
.create_image(image
)
611 except rift
.mano
.cloud
.CloudAccountCalError
as e
:
612 msg
= "error when uploading image {} to cloud account: {}".format(image
.name
, str(e
))
614 raise ImageUploadError(msg
) from e
616 self
._log
.debug('uploaded image (id: {}) to account{}: {}'.format(
617 image
.id, account
.name
, image
.name
))
622 def _do_upload(self
):
624 self
.state
= "CHECK_IMAGE_EXISTS"
625 has_image
= yield from asyncio
.wait_for(
626 self
._loop
.run_in_executor(None, self
._has
_existing
_image
),
627 timeout
=self
._timeout
_exists
,
631 self
.state
= "COMPLETED"
632 self
._detail
= "Image already exists on destination"
635 self
.state
= "UPLOADING"
636 self
._detail
= "Uploading image"
638 # Note that if the upload times out, the upload thread may still
639 # stick around. We'll need another method of cancelling the task
640 # through the VALA interface.
641 image_id
= yield from asyncio
.wait_for(
642 self
._loop
.run_in_executor(None, self
._upload
_image
),
643 timeout
=self
._timeout
_upload
,
647 except asyncio
.CancelledError
as e
:
648 self
.state
= "CANCELLED"
649 self
._detail
= "Image upload cancelled"
651 except ImageUploadTaskError
as e
:
652 self
.state
= "FAILED"
653 self
._detail
= str(e
)
655 except asyncio
.TimeoutError
as e
:
656 self
.state
= "FAILED"
657 self
._detail
= "Timed out during upload task: %s" % str(e
)
660 # If the user does not provide a checksum and performs a URL source
661 # upload with an incorrect URL, then Glance does not indicate a failure
662 # and the CAL cannot detect an incorrect upload. In this case, use
663 # the bytes_written to detect a bad upload and mark the task as failed.
664 if self
._progress
_info
and self
._progress
_info
.bytes_written
== 0:
665 self
.state
= "FAILED"
666 self
._detail
= "No bytes written. Possible bad image source."
669 self
.state
= "COMPLETED"
670 self
._detail
= "Image successfully uploaded. Image id: %s" % image_id
673 self
._stop
_time
= time
.time()
674 self
._upload
_future
= None
678 """ Wait for the upload task to complete """
679 if self
._upload
_future
is None:
680 raise ImageUploadError("Task not started")
682 yield from asyncio
.wait_for(
684 self
._timeout
_upload
, loop
=self
._loop
688 """ Start the upload task """
689 if self
._state
!= "QUEUED":
690 raise ImageUploadError("Task already started")
692 self
._log
.info("Starting %s", self
)
694 self
._upload
_future
= self
._loop
.create_task(self
._do
_upload
())
696 return self
._upload
_future
699 """ Stop the upload task in progress """
700 if self
._upload
_future
is None:
701 self
._log
.warning("Cannot cancel %s. Not in progress.", self
)
704 self
.state
= "CANCELLING"
705 self
._detail
= "Cancellation has been requested"
707 self
._log
.info("Cancelling %s", self
)
708 self
._upload
_future
.cancel()
709 if self
._write
_canceller
is not None:
710 self
._write
_canceller
.stop()