Openstack liberty源码分析 之 云主机的启动过程3
Posted lizhongwen1987
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Openstack liberty源码分析 之 云主机的启动过程3相关的知识,希望对你有一定的参考价值。
接上篇Openstack liberty源码分析 之 云主机的启动过程2, 简单回顾下:nova-conductor
收到nova-scheduler
返回的主机列表后,依次发送异步rpc请求给目标主机的nova-compute
服务,下面继续来看nova-compute
服务的处理过程:
nova-compute
根据路由映射,nova-compute
中处理云主机启动请求的方法为
nova/compute/manager.py.ComputeManager.py.build_and_run_instance
, 该方法没有做实质性的工作,只是通过eventlet
创建一个工作线程用于后续的云主机启动工作,以便解耦rpc工作线程,该工作线程后续调用_do_build_and_run_instance
方法继续后续操作,一起来看一看:
#该处省略了装饰器定义
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password,
injected_files,
requested_networks, security_groups,
block_device_mapping,
node=None, limits=None):
#该处省略了异常处理
LOG.info(_LI('Starting instance...'), context=context,
instance=instance)
#instance是一个InstancV2实例对象,这里更新实例状态,并通过
#`conductor rpc api`发送同步请求到`conductor`执行实例状态更新
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
#limits包含node的资源限制,包括:内存和磁盘
if limits is None:
limits = {}
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug('No node specified, defaulting to %s', node,
#省略了异常处理,将调用请求转发给_build_and_run_instance执行后续
#处理
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password,
requested_networks,
security_groups, block_device_mapping,
node, limits,
filter_properties)
return build_results.ACTIVE
继续来看看_build_and_run_instance
的实现:
def _build_and_run_instance(self, context, instance, image,
injected_files,
admin_password, requested_networks,
security_groups,
block_device_mapping, node, limits,
filter_properties):
"""image是一个包含镜像信息的字典,‘name’是镜像的名字,例子中的镜像
信息如下:
{
u'status': u'active', u'deleted': False,
u'container_format': u'bare', u'min_ram': 0,
u'updated_at': u'2016-03-24T06:58:33.000000',
u'min_disk': 0,
u'owner': u'25520b29dce346d38bc4b055c5ffbfcb',
u'is_public': True, u'deleted_at': None,
u'properties': {}, u'size': 1401421824,
u'name': u'ceph-centos-65-x64-20g.qcow2',
u'checksum': u'a97deac197e76e1f5a427484b1e5df4c',
u'created_at': u'2016-03-24T06:57:28.000000',
u'disk_format': u'qcow2',
u'id': u'226bc6e5-60d7-4a2c-bf0d-a568a1e26e00'
}
"""
image_name = image.get('name')
'''省略异常处理'''
#获取/创建ResourceTracker实例,为后续的资源申请做准备
rt = self._get_resource_tracker(node)
#limits包含node的内存,磁盘等资源配额信息,验证node中的资源是否满足
#该次启动请求,资源不足则抛出异常,可以在日志文件中看到类似的INFO log
# ”Attempting claim: memory 2048 MB, disk 20 GB“
with rt.instance_claim(context, instance, limits):
# NOTE(russellb) It's important that this validation be
# done
# *after* the resource tracker instance claim, as that
#is where the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
#为云主机申请网络资源,完成块设备验证及映射,更新实例状态
with self._build_resources(context, instance,
requested_networks, security_groups,
image,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they
# aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
#调用hypervisor的spawn方法启动云主机实例,我使用的是
#libvirt;所以这里跳转到`nova/virt/libvirt/driver.py/
#LibvirtDriver.spawn,见下面的分析
self.driver.spawn(context, instance, image,
injected_files,
admin_password,
network_info=network_info,
block_device_info=block_device_info)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None,
block_device_info=None):
"""主要实现三个功能:
1. 从glance下载镜像(如果本地_base目录没有的话),然后上传到后端存储
2. 生成libvirt xml文件
3. 调用libvirt启动实例
"""
#根据image字典信息创建`nova/objects/image_meta.py/ImageMeta
#对象
image_meta = objects.ImageMeta.from_dict(image_meta)
#根据模拟器类型,获取块设备及光驱的总线类型,默认使用kvm,所以:
#块设备,默认使用virtio;光驱,默认使用ide;并且根据
#block_device_info设置设备映射,最后返回包含
#{disk_bus,cdrom_bus,mapping}的字典
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
#从glance下载镜像(如果本地_base目录没有的话),然后上传到后端存储
#具体分析见后文
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
#生成libvirt xml文件,具体分析见后文
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
#调用libvirt启动实例,具体分析见后文
self._create_domain_and_network(context, xml, instance,
network_info,
disk_info,
block_device_info=block_device_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
#等待实例创建结果(通过libvirt获取云主机状态判断)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
如果spawn
方法正常返回,云主机实例就创建成功了。可以在Dashboard
上看到新创建的云主机为’运行’状态,通过virsh list
命令也可以在宿主上看到实例进程。下面先来看看是_create_image
如何创建磁盘的。
创建系统磁盘
_create_image
方法代码很长,下面先来看看镜像磁盘的创建过程;另外在下面的分析中只给出关键部分代码,详细内容请读者查阅源码文件:
nova/virt/libvirt/driver.py/LibvirtDriver._create_image
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
#由于我们是从镜像启动,所以booted_from_volume=False
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
......
"""输入参数:disk_images:None
根据instance实例信息填充disk_images
{
'kernel_id': u'',
'image_id': u'226bc6e5-60d7-4a2c-bf0d-a568a1e26e00',
'ramdisk_id': u''
}
"""
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
......
#booted_from_volume=False
if not booted_from_volume:
#根据`image_id`hash,生成系统磁盘名
root_fname = imagecache.get_cache_fname(disk_images,
'image_id')
#root_gb是系统盘的大小,我的例子中是20(20G)
size = instance.root_gb * units.Gi
#输入参数:suffix=''
if size == 0 or suffix == '.rescue':
size = None
#由于我们采用的存储后端是ceph,所以这里生成的backend=Rbd
#`disk`参数作为生成的设备名后缀:'uuid_disk'
backend = image('disk')
#正常启动云主机,至此task_state=spawning状态
if instance.task_state == task_states.RESIZE_FINISH:
backend.create_snap(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
#Rbd支持clone操作
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context,
disk_images['image_id'])
except exception.ImageUnacceptable:
#如果调用clone发生异常,就调用fecth_image
#下载镜像
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
#如果后端使用的lvm,那个就是走这里
fetch_func = libvirt_utils.fetch_image
#_try_fetch_image_cache直接调用
#`backend.cache = Rbd.cache`方法从glance下载镜像
#并创建系统盘上传到后端存储,如果出现ImageNotFound异常,
#则会尝试从本地的()
self._try_fetch_image_cache(backend, fetch_func,
context,
root_fname,
disk_images['image_id'],
instance, size,
fallback_from_host)
......
下面来看nova/virt/libvirt/imagebackend.py/Rbd.cache
的实现:
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
@utils.synchronized(filename, external=True,
lock_path=self.lock_path)
def fetch_func_sync(target, *args, **kwargs):
# The image may have been fetched while a subsequent
# call was waiting to obtain the lock.
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
#合成本地镜像缓存路径,可以在nova.conf文件中修改instances_path和
#image_cache_subdirectory_name配置缓存路径,我的环境为:
#/opt/stack/data/nova/instances/_base
#有读者可能想到这里应该能用高速设备,提升性能了!!!
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
#拼接镜像路径:/opt/stack/data/nova/instances/_base/filename
#我的例子为:/opt/stack/data/nova/instances/_base/
#cb241933d7daa40a536db47d41376dd03a83b517
base = os.path.join(base_dir, filename)
#如果镜像不存在(通常都不存在)就从glance下载
#(调用RBDDriver.exits方法判断)
if not self.check_image_exists() or not
os.path.exists(base):
#fetch_func_sync是fetch_func的互斥版本,下面分析继续
#create_image
self.create_image(fetch_func_sync, base, size,
*args, **kwargs)
#Rbd不支持fallocate
if (size and self.preallocate and self._can_fallocate() and
os.access(self.path, os.W_OK)):
utils.execute('fallocate', '-n', '-l', size, self.path)
----------------------------------------------------------
#nova/virt/libvirt/imagebackend.py/Rbd.create_image
def create_image(self, prepare_template, base, size, *args,
**kwargs):
"""如果本地没有镜像缓存就先从glance下载到本地,否则直接从本地缓存导入
输入参数prepare_template指向fetch_func_sync,函数调用链条如下
(忽略装饰器):
prepare_template(fetch_func_sync)
-> fetch_func(clone_fallback_to_fetch)
(如果由于镜像是qcow格式,抛异常了,就会执行下面的fetch_image调用)
-> Rbd.clone
-> libvirt_utils.fetch_image
具体请看下文clone的代码分析
"""
if not self.check_image_exists():
prepare_template(target=base, max_size=size, *args, **kwargs)
# prepare_template() may have cloned the image into a new rbd
# image already instead of downloading it locally
#上面的prepare_template方法将镜像下载到本地缓存后(如果本地没有的
#话),RBDDriver再调用`rdb import`将镜像上传到nova的存储后端
#如果镜像是raw格式的话,镜像将不会缓存在本地,rbd直接在rbd pool
#间完成clone,我想你应该知道:用rbd作为glance及nova后端,镜像格式就
#应该是raw了吧!!!
if not self.check_image_exists():
self.driver.import_image(base, self.rbd_name)
self.verify_base_size(base, size)
if size and size > self.get_disk_size(self.rbd_name):
self.driver.resize(self.rbd_name, size)
---------------------------------------------------------
#如果check_image_exists发现本地没有镜像缓存,就会触发下面的clone调用
def clone(self, context, image_id_or_uri):
#通过glanceclient获取镜像元信息
image_meta = IMAGE_API.get(context, image_id_or_uri,
include_locations=True)
"""
[
{'url': u'rbd://1ee20ded-caae-419d-9fe3-5919f129cf55/images/226bc6e5-60d7-4a2c-bf0d-a568a1e26e00/snap', 'metadata': {}}
]
"""
locations = image_meta['locations']
LOG.debug('Image locations are: %(locs)s' % {'locs': locations})
#我的镜像是qcow2格式的,所以这里抛异常了
#所以如果以Rbd作为nova的后端存储,最好上传raw格式的镜像(在后面的分析
#中可以看到:会把非raw格式的镜像转换为raw,这会带来性能损耗)
#还记得LibvirtDriver._create_image方法中说,如果clone异常了,
#就会再次调用libvirt_utils.fetch_image方法吧!在这里就看到效果了
if image_meta.get('disk_format') not in ['raw', 'iso']:
reason = _('Image is not raw format')
raise exception.ImageUnacceptable(image_id=image_id_or_uri,
reason=reason)
#如果是raw格式的镜像,则执行这里
for location in locations:
#判断是否支持clone,如果是raw格式就支持;
#直接调用clone方法克隆image(将image从源pool拷贝到目的pool)
if self.driver.is_cloneable(location, image_meta):
return self.driver.clone(location, self.rbd_name)
#如果是其他格式,则抛异常
reason = _('No image locations are accessible')
raise exception.ImageUnacceptable(image_id=image_id_or_uri,
reason=reason)
-----------------------------------------------------------
"""正如上面说的:如果不是raw格式,`Rbd.clone`方法会抛异常,再次调用
`libvirt_utils.fetch_image`下载镜像,而该方法直接调用
`nova/virt/images.py/fetch_to_raw`方法,一起来看看:
"""
def fetch_to_raw(context, image_href, path, user_id,
project_id, max_size=0):
"""
1.从glance下载镜像到本地,保存到'hash(image_id).part'文件
2.如果需要的话,将镜像转换为raw格式,保存到'hash(image_id).converted'
3.删除'hash(image_id).part'文件,重命
名'hash(image_id).converted'为'hash(image_id)'
"""
#path就是之前`Rbd.cache`方法中的base,我的例子中是:
#/opt/stack/data/nova/instances/_base/
#cb241933d7daa40a536db47d41376dd03a83b517
path_tmp = "%s.part" % path
#调用glanceclient从glance下载image镜像,并存储在path_tmp路径上
fetch(context, image_href, path_tmp, user_id, project_id,
max_size=max_size)
with fileutils.remove_path_on_error(path_tmp):
#调用`qemu-img info`命令获取刚才下载的image镜像文件的信息
data = qemu_img_info(path_tmp)
#镜像格式
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
#不支持后备文件
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(image_id=image_href,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
"""
# We can't generally shrink incoming images, so
#disallow
# images > size of the flavor we're booting. Checking
#here avoids
# an immediate DoS where we convert large qcow images
#to raw
# (which may compress well but not be sparse).
# TODO(p-draigbrady): loop through all flavor sizes, so
# that
# we might continue here and not discard the download.
# If we did that we'd have to do the higher level size
#checks
# irrespective of whether the base image was prepared
#or not.
"""
disk_size = data.virtual_size
if max_size and max_size < disk_size:
LOG.error(_LE('%(base)s virtual size %(disk_size)s'
'larger than flavor root disk size (size)s'),
{'base': path,
'disk_size': disk_size,
'size': max_size})
raise exception.FlavorDiskSmallerThanImage(
flavor_size=max_size, image_size=disk_size)
#如果不是raw格式,强制转换为raw格式
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" %
(image_href, fmt))
with fileutils.remove_path_on_error(staged):
try:
"""调用`qemu-img convert`命令将之前下载的非raw
格式镜像path_tmp转换为raw格式,并存储到staged文
件中
"""
convert_image(path_tmp, staged, fmt, 'raw')
except exception.ImageUnacceptable as exp:
# re-raise to include image_href
raise exception.ImageUnacceptable(
image_id=image_href,
reason=_("Unable to convert image to
raw: %(exp)s") % {'exp': exp})
#删除最开始下载的非raw格式镜像文件
os.unlink(path_tmp)
#调用`qemu-img info`命令获取转换格式后的image
#镜像文件的信息,如果不是raw格式就抛异常
data = qemu_img_info(staged)
if data.file_format != "raw":
raise exception.ImageUnacceptable(
image_id=image_href,
reason=_("Converted to raw, but format
is now %s") % data.file_format)
#重命名转换的镜像文件
os.rename(staged, path)
else:
os.rename(path_tmp, path)
小结:上面分析了以ceph rbd作为存储后端的系统磁盘生成过程,有这么几个要点:
- 当以ceph rdb作为存储后端的时候,最好使用raw格式的镜像
如果由于某些原因使用了qcow2之类的镜像格式,最好将
_base
缓存目录放到高速设备上,加快云主机的启动速度创建/配置非系统磁盘
回到
nova/virt/libvirt/driver.py/LibvirtDriver._create_image
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
"""先看看disk_mapping参数:定义了三个设备
disk_mapping: {
'disk.config': {'bus': 'ide', 'type': 'cdrom', 'dev': 'hdd'},
'disk': {'bus': 'virtio', 'boot_index': '1',
'type': 'disk', 'dev': u'vda'},
'root': {'bus': 'virtio', 'boot_index': '1',
'type': 'disk', 'dev': u'vda'}}
"""
#disk_mapping不包含下述磁盘并且处理逻辑与前面创建系统盘相似,
#直接跳过`disk.local`,`disk.swap`,`ephemerals`部分代码
.......
# Config drive(默认使用配置磁盘)
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
#管理员密码
if admin_pass:
extra_md['admin_pass'] = admin_pass
#输入参数files=[], network_info包含详细的网络配置信息,
#instance是InstanceV2对象,包含云主机详细信息
#获取云主机实例的配置信息,创建InstanceMetadata对象
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md,
network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md)
as cdb:
#拼接配置文件的路径:
#CONF.instances_path/instance.uuid/disk.conf
#CONF.instances_path可以在nova.conf中配置
configdrive_path =
self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path},
instance=instance)
try:
#调用ConfigDriveBuilder.make_drive方法创建配置文件
#内部调用CONF.mkisofs_cmd(默认genisoimage)工具创
#建configdrive_path文件,内容为云主机配置信息
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive'
'failed with error: %s'),
e, instance=instance)
try:
# Tell the storage backend about the config drive
#根据类型创建配置磁盘后端,我的例子中使用rbd,所以创建的是
#Rbd实例(否则就是raw实例)
config_drive_image = self.image_backend.image(
instance, 'disk.config' + suffix,
self._get_disk_config_image_type())
#这里会调用`rbd import`命令将configdrive_path文件导入到
#ceph中(磁盘名为:instance_uuid_disk.conf)
config_drive_image.import_file(
instance, configdrive_path, 'disk.config' +
suffix)
finally:
# NOTE(mikal): if the config drive was imported
#into RBD, then we no longer need the local copy
#删除本地的配置文件
if CONF.libvirt.images_type == 'rbd':
os.unlink(configdrive_path)
# File injection only if needed
#默认情况下inject_partition=-2,就是不允许直接将配置注入到磁盘中
#另外,如果云主机是从磁盘启动的,是不支持注入的
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from'
'volume instance is not supported'),
instance=instance)
#注入到系统盘中,具体请看nova/virt/disk/vfs下的相关代码
#主要是借助guestfs模块实现
self._inject_data(
instance, network_info, admin_pass, files,
suffix)
#通常virt_type=kvm,如果是uml,则磁盘需要root权限
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
生成libvirt xml配置
经过上面的_create_image
方法所有的磁盘设备都配置好了,下面来看看libvirt xml的生成过程:
#nova/virt/libvirt/driver.py/LibvirtDriver._get_guest_xml
def _get_guest_xml(self, context, instance, network_info,
disk_info,
image_meta, rescue=None,
block_device_info=None,
write_to_disk=False):
"""NOTE(danms): Stringifying a NetworkInfo will take a
lock. Do this ahead of time so that we don't acquire it
while also holding the logging lock.
"""
"""代码逻辑很清晰:
1. 根据配置生成云主机配置字典
2. 将配置字典转换为xml格式
3. xml保存到本地
"""
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str,
'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain
#auth_password so we need to sanitize the password in the
#message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info,
image_meta,
disk_info, rescue,
block_device_info,
context)
#将云主机配置转换为xml格式
xml = conf.to_xml()
#记录到本地磁盘
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
#将xml配置保存到CONF.instance_path/instance_uuid/libvirt.xml
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
_get_guest_xml
方法比较简单,这里不再分析了,有疑问的读者可以联系我一起讨论。
启动云主机
函数实现如下:
def _create_domain_and_network(self, context, xml, instance,
network_info,
disk_info,
block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False):
#几个关键的输入参数如下:
"""
disk_info:
{
'disk_bus': 'virtio', 'cdrom_bus': 'ide',
'mapping': {
'disk.config': {'bus': 'ide', 'type': 'cdrom',
'dev': 'hdd'},
'disk': {'bus': 'virtio', 'boot_index': '1',
'type': 'disk', 'dev': u'vda'},
'root': {'bus': 'virtio',
'boot_index': '1', 'type': 'disk', 'dev': u'vda'}
}
}
block_device_info:
{
'swap': None, 'root_device_name': u'/dev/vda',
'ephemerals': [], 'block_device_mapping': []
}
network_info:包含云主机的网络配置信息
[VIF({'profile': {}, 'ovs_interfaceid': None,
'preserve_on_delete': False,
'network': Network({'bridge': u'brq20f5ec1b-4f', 'subnets':
[Subnet({'ips': [FixedIP({'meta': {}, 'version': 4, 'type':
'fixed', 'floating_ips': [], 'address':
u'xx.xxx.xxx.xxx'})], 'version': 4, 'meta': {'dhcp_server':
u'xx.xxx.xxx.xxx'}, 'dns': [], 'routes': [], 'cidr':
u'xx.xxx.xxx.0/xxx', 'gateway': IP({'meta': {}, 'version':
4, 'type': 'gateway', 'address': u'10.240.227.1'})})],
'meta': {'injected': False, 'tenant_id':
u'25520b29dce346d38bc4b055c5ffbfcb',
'should_create_bridge': True}, 'id': u'20f5ec1b-4f96-41d8-
97f3-6776db0d00a7', 'label': u'10.240.227.x'}), 'devname':
u'tapefe77b47-fe', 'vnic_type': u'normal', 'qbh_params':
None, 'meta': {}, 'details': {u'port_filter': True},
'address': u'fa:16:3e:5e:64:80', 'active': False, 'type':
u'bridge', 'id': u'efe77b47-fef8-48ff-93ee-8da753a6d2bb',
'qbg_params': None})]
"""
#获取块设备映射,由输入参数我们知道block_device_mapping=[]
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
#获取image的metadata
image_meta = objects.ImageMeta.from_instance(instance)
#如果开启了磁盘加密,就用指定的加密算法加密磁盘
#我们这里block_device_mapping=[],忽略相关的代码
for vol in block_device_mapping:
.......
#vif_plugging_timeout=300(默认5分钟)
#检查neutron网络事件,如果vif是非active状态,就需要处理plug事件
#我的例子中events为:[('network-vif-plugged', u'efe77b47-
#fef8-48ff-93ee-8da753a6d2bb')]
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
#pause = true
pause = bool(events)
guest = None
#忽略try{ }except处理代码
#在启动云主机前,需要先准备好虚拟网卡
#调用ComputeVirtAPI.wait_for_instance_event处理neutron网络
#事件,这里是network-vif-plugged事件,在
#wait_for_instance_event中启动eventlet线程处理事件,并等待结束
#如果发生异常,则调用self._neutron_failed_callback处理。
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
#安装虚拟网卡(我使用的是bridge,最终调用的是
#LibvirtGenericVIFDriver.plug_bridge方法)
""" 简单分析如下:
调用self.plug_vifs后,内部会通过判断vif的类型(我的例子中用
的是bridge)来调用具体的接口,然后具体的调用是这样的:
self.plug_vifs ->
nova/virt/libvirt/vif.py/LibvirtGenericVIFDriver.plug
-> LibvirtGenericVIFDriver.plug_bridge ->
nova/network/linux_net.py/
LinuxBridgeInterfaceDriver.ensure_bridge, 最后是通过
brctl工具创建的bridge,具体的实现读者可以自行看看
"""
self.plug_vifs(instance, network_info)
#设置基本的iptables规则
self.firewall_driver.setup_basic_filtering(instance,
network_info)
#为云主机设置网络过滤规则,防火墙策略
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, image_meta,
block_device_info,
disk_info):
#调用libvirt库启动虚拟机
#xml是云主机xml配置,pause=true,power_on=true
#我使用的是qemu-kvm,所以先会通过qemu:///system连接
#hypervisor,然后执行define,最后启动云主机
guest = self._create_domain(
xml, pause=pause, power_on=power_on)
#no-ops
self.firewall_driver.apply_instance_filter(instance,
network_info)
# Resume only if domain has been paused
if pause:
guest.resume()
return guest
如果一切正常,返回到LibvirtDriver.spawn
等待云主机启动完成。虚拟机启动成功后,继续返回到_build_and_run_instance
,在该方法的末尾会更新云主机状态,更新数据库,发送通知给scheduler更新资源使用情况。
总得来说,云主机启动过程中各个模块之间的交互还没蛮复杂的。有很多细节需要考虑。需要多多花时间斟酌。
以上是关于Openstack liberty源码分析 之 云主机的启动过程3的主要内容,如果未能解决你的问题,请参考以下文章
Openstack liberty Glance上传镜像源码分析
Openstack liberty中nova-compute服务的启动过程
私有云搭建 OpenStack(centos7.3, centos-release-openstack-liberty) (中篇)