class ForemanVM
Manage a cluster of KVM hosts using Foreman
Author: Mark Heily <mark.heily@bronto.com>
Attributes
Public Class Methods
# File lib/foreman_vm.rb, line 322 def initialize @log = Logger.new(STDERR) @action = nil @config = ForemanAP::Config.new @cluster = ForemanAP::Cluster.new( @config.hypervisors, @config.libvirt_user, @config.libvirt_password) #FIXME: reenable this: @workqueue = ForemanAP::Workqueue.new('foreman-vm') @console = ForemanAP::ConsoleViewer.new(@cluster) # TODO: transition to using @config.foreman_user everywhere # instead of @user/@password # if @config.foreman_user @user = @config.foreman_user @password = @config.foreman_password else @user = ENV['USER'] @password = nil end @foreman_api = ForemanAP::ForemanAPI.new(@config.foreman_uri, @user, @password) # Build specifications @buildspec = { 'cpus' => '1', # Number of vCPUs 'memory' => '536870912', # Memory, in bytes (default: 512MB) 'disk_capacity' => '20G', 'disk_format' => 'raw', 'storage_pool' => 'vm-corp-004', 'domain' => `dnsdomainname`.chomp, 'network_interface' => 'vnet0.201', 'provision_method' => 'build', 'owner' => 'nil', # 'image_id' => '/srv/images/centos6-generic-template.qcow2', 'console' => false, #Hidden for testing purposes '_clone' => false, '_copy' => false, '_libgfapi' => true, '_disk_backing_file' => '/var/lib/libvirt/images/centos6-dude-template.qcow2', } end
Public Instance Methods
Ask the user for their Foreman password
# File lib/foreman_vm.rb, line 73 def ask_password printf 'Enter your Foreman password: ' system "stty -echo" @password = STDIN.gets.chomp system "stty echo" end
Update the build specification
# File lib/foreman_vm.rb, line 67 def buildspec=(spec) @buildspec.merge! spec end
Clone an existing disk volume to reduce the build time
# File lib/foreman_vm/storage_pool.rb, line 71 def clone_volume delete_volume # BUG: We would like to do this, but it creates the file owned by root:root #virsh "vol-create-as --pool #{@buildspec['storage_pool']} --name #{fqdn()}-disk1 --capacity 30G --format qcow2 --backing-vol #{@buildspec['_disk_backing_file']} --backing-vol-format qcow2" # # WORKAROUND: use an XML volume definition to set the owner/group # xml = "<volume> <name>#{fqdn}-disk1</name> <key>/gvol/images/#{fqdn}-disk1</key> <source> </source> <capacity unit='bytes'>32212254720</capacity> <allocation unit='bytes'>197120</allocation> <target> <path>/gvol/images/#{fqdn}-disk1</path> <format type='qcow2'/> <permissions> <mode>0660</mode> <owner>107</owner> <group>107</group> </permissions> </target> <backingStore> <path>#{@buildspec['_disk_backing_file']}</path> <format type='qcow2'/> </backingStore> </volume> " @log.debug "creating volume: #{xml}" virsh("vol-create --pool gvol --file /dev/stdin >/dev/null", xml) end
# File lib/foreman_vm.rb, line 35 def compute_resource=(txt) # KLUDGE: use shortnames because Foreman does @buildspec['compute_resource'] = txt.gsub(/\..*/, '') end
Copy an existing disk volume to reduce the build time
# File lib/foreman_vm/storage_pool.rb, line 63 def copy_volume delete_volume virsh "vol-clone --pool #{@buildspec['storage_pool']} --vol #{@buildspec['_disk_backing_file']} --newname #{fqdn()}-disk1" end
Build a new virtual machine
# File lib/foreman_vm.rb, line 206 def create spec = @buildspec # If no compute resource is given, select the one with the most # available memory. spec['compute_resource'] ||= @cluster.best_fit(spec['name'], normalize_memory(spec['memory'])).gsub(/\..*/, '') if spec['_clone'] == true #FIXME: does not belong here spec['disk_format'] = 'qcow2' spec['storage_pool'] = 'gvol' end refresh_storage_pool(spec) rec = { 'domain_id' => @foreman_api.get_id('domains', spec['domain']), 'subnet_id' => @foreman_api.get_id('subnets', spec['subnet']), 'name' => spec['name'], 'build' => "true", 'enabled' => "true", # XXX-FIXME: hardcoded, should not use this.. #'compute_profile_id' => '5', 'compute_resource_id' => @foreman_api.get_id('compute_resources', spec['compute_resource']) , 'environment_id' => @foreman_api.get_id('environments', spec['environment']), 'managed' => true, 'hostgroup_id' => @foreman_api.get_id('hostgroups', spec['hostgroup'], 'title'), 'provision_method' => spec['provision_method'], 'compute_attributes' => { 'memory' => normalize_memory(spec['memory']), 'image_id' => spec['image_id'], 'nics_attributes' => { '0' => { 'bridge' => spec['network_interface'], 'model' => 'virtio', 'type' => 'bridge', } }, 'interfaces_attributes' => { '0' => { 'bridge' => spec['network_interface'], 'model' => 'virtio', 'type' => 'bridge' }, }, 'cpus' => spec['cpus'], 'start' => '0', 'volumes_attributes' => { '0' => { 'capacity' => spec['disk_capacity'], 'pool_name' => spec['storage_pool'], 'format_type' => spec['disk_format'], } } } } if spec['organization'] rec['organization_id'] = @foreman_api.get_id('organizations', spec['organization'], 'title') end if spec['owner'] rec['owner_id'] = @foreman_api.get_id('users', spec['owner'], 'login') end if spec['provision_method'] == 'image' rec['image_id'] = 3 rec['image_name'] = 'centos6-generic' rec['compute_attributes']['image_id'] = spec['image_id'] end if spec['_clone'] or spec['_copy'] rec['build'] = false end # configure the volumes # TODO: use a BuildSpec object for everything. spec2 = ForemanAP::BuildSpec.new spec2.disk_capacity = spec['disk_capacity'] spec2.storage_pool = spec['storage_pool'] spec2.disk_format = spec['disk_format'] ### XXX-TESTING: rec['compute_attributes']['volumes_attributes'] = {} ###rec['compute_attributes']['volumes_attributes'] = spec2.to_foreman_api['compute_attributes']['volumes_attributes'] #pp rec #raise 'FIXME' @foreman_api.request(:post, "/hosts", rec) raise 'FIXME - not implemented' if spec['_clone'] == true or ['spec_copy'] == true # Create volumes and attach them to the VM create_storage #DEADWOOD: #####if spec['_clone'] == true ##### clone_volume #####elsif spec['_copy'] == true ##### copy_volume #####else ##### # crude way to fix the permissions ##### wipe_volume #####end #####enable_libgfapi if spec['_libgfapi'] #FIXME: implement this #raise 'Duplicate IP address' if ip_address_in_use? $GET_THE_ADDRESS_HERE @cluster.guest(fqdn).start # Attach to the console if spec['console'] guest = spec['name'] + '.' + spec['domain'] host = spec['compute_resource'] + '.' + spec['domain'] console_attach(host, guest) end end
Create storage and attach it to the virtual machine
# File lib/foreman_vm.rb, line 185 def create_storage pool = @config.storage_pool host = @cluster.member(@cluster.find(fqdn)) disk_number = 0 @buildspec['disk_capacity'].split(',').each do |disk_size| disk_number += 1 capacity = normalize_memory(disk_size) puts "create #{fqdn} - #{capacity}" basedir = '/gvol/images' #XXX-HARDCODED path = basedir + '/' + fqdn + '-disk' + disk_number.to_s @cluster.guest(fqdn).add_libgfapi_volume( "gvol/images/#{fqdn}-disk#{disk_number.to_s}", @config.glusterfs_server, disk_number ) host.storage_pool(pool).create_volume(path, capacity) end end
Submit a deferred job
# File lib/foreman_vm.rb, line 141 def defer(action) @workqueue.enqueue({ 'user' => @user, 'action' => action, 'buildspec' => @buildspec, 'api_version' => 1, }) end
Destroy a virtual machine
# File lib/foreman_vm.rb, line 110 def delete # Check if it uses libgfapi. If so, we need to disable it # if self.dumpxml =~ /protocol='gluster'/ # self.stop # self.disable_libgfapi # end # Call 'virsh destroy' to kill the VM begin @foreman_api.request(:delete, "/hosts/#{self.fqdn}", {'id' => self.fqdn}) rescue # Try again, to workaround a bug where the first deletion fails.. @foreman_api.request(:delete, "/hosts/#{self.fqdn}", {'id' => self.fqdn}) # When the bug hits, the volume is left behind. force it's deletion # # Horrible kludge: hardcoded something that will allow the delete to work gvol = @cluster.member(@cluster.members[0]).storage_pool('gvol') gvol.refresh gvol.volume("#{self.fqdn}-disk1").delete end end
Delete the disk volume associated with the VM
# File lib/foreman_vm/volume.rb, line 34 def delete_volume virsh "vol-delete #{self.fqdn}-disk1 --pool #{@buildspec['storage_pool']}" end
Modify the VM definition to stop using libgfapi
# File lib/foreman_vm/domain.rb, line 135 def disable_libgfapi self.stop require 'rexml/document' doc = REXML::Document.new(virsh("dumpxml --security-info #{self.fqdn}")) # Convert the file-backed disk into a libgfapi disk doc.elements.each('domain/devices/disk') do |ele| ele.attributes['type'] = 'file' end doc.elements.each('domain/devices/disk') do |ele| ele.delete_element('source') ele.add_element('source', {'file'=>"/gvol/images/#{self.fqdn}-disk1", 'protocol' => 'gluster'}) end virsh("undefine #{self.fqdn}") virsh("define /dev/stdin >/dev/null 2>&1", doc.to_s) end
Get the state of the domain
# File lib/foreman_vm/domain.rb, line 116 def domstate virsh("domstate #{self.fqdn}").chomp.chomp end
# File lib/foreman_vm/domain.rb, line 129 def dumpxml puts virsh("dumpxml --security-info #{self.fqdn}") end
Modify the VM definition to use libgfapi glusterfs_server
the FQDN of the GlusterFS server
# File lib/foreman_vm/domain.rb, line 157 def enable_libgfapi(glusterfs_server) require 'rexml/document' doc = REXML::Document.new(virsh("dumpxml --security-info #{self.fqdn}")) raise 'cannot enable libgfapi while the VM is running' if domstate == 'running' # When cloning or copying, no need to boot from the network if @buildspec['_clone'] or @buildspec['_copy'] doc.delete_element "/domain/os/boot[@dev='network']" end # Set cache=none just in case # Set the disk type, just in case doc.elements.each('domain/devices/disk/driver') do |ele| ele.attributes['cache'] = 'none' ele.attributes['type'] = @buildspec['disk_format'] end # Convert the file-backed disk into a libgfapi disk doc.elements.each('domain/devices/disk') do |ele| ele.attributes['type'] = 'network' end diskcount = 1 # XXX-KLUDGE: we should actually look at the disk filename doc.elements.each('domain/devices/disk') do |ele| ele.delete_element('source') ele.add_element('source', {'name'=>"gvol/images/#{self.fqdn}-disk#{diskcount}", 'protocol' => 'gluster'}) diskcount += 1 end doc.elements.each('domain/devices/disk/source') do |ele| ele.add_element('host', {'name'=>glusterfs_server, 'transport'=>'tcp', 'port'=>'0'}) end virsh("undefine #{self.fqdn}") virsh("define /dev/stdin >/dev/null 2>&1", doc.to_s) end
Get the FQDN of the VM
# File lib/foreman_vm.rb, line 42 def fqdn @buildspec['name'] + '.' + @buildspec['domain'] end
Get the hypervisor that hosts the the VM
# File lib/foreman_vm.rb, line 48 def hypervisor @buildspec['compute_resource'] ||= @cluster.find(fqdn) end
View the list of deferred jobs (TODO: stop leaking Beanstalkd details)
# File lib/foreman_vm.rb, line 135 def job_status 'job status: ' + @workqueue.jobs end
Set the VM hostname
# File lib/foreman_vm.rb, line 54 def name=(arg) if arg =~ /(.*?)\.(.*)/ @hostname = $1 @buildspec['name'] = $1 @buildspec['domain'] = $2 else @hostname = arg @buildspec['name'] = arg end end
Parse command line options
# File lib/foreman_vm/getopt.rb, line 6 def parse_options optparse = OptionParser.new do |opts| opts.on( '--user USER', 'The username to login to Foreman with') do |arg| @user = arg end opts.on( '--password PASSWORD', 'The password to login to Foreman with') do |arg| @password = arg end opts.on( '--password-file FILE', 'The file containing your Foreman password') do |arg| raise 'File does not exist' unless File.exists? arg @password = `cat #{arg}`.chomp end opts.on( '--foreman-uri URI', 'The URI of the Foreman password') do |arg| @foreman_uri = arg #options['foreman_uri'] = arg end opts.separator "" opts.separator "Specific options for building hosts:" opts.on( '--console', 'Attach to the virtual machine console') do |arg| @buildspec['console'] = true @action = 'console' if @action.nil? #XXX-KLUDGE for testing end opts.on( '--cpus CPUS', 'The number of CPUs to assign to the VM') do |arg| @buildspec['cpus'] = arg end opts.on( '--memory BYTES', 'The amount of memory, in bytes, to assign to the VM') do |arg| @buildspec['memory'] = arg end opts.on( '--domain DOMAIN', 'The DNS domain name to use') do |arg| @buildspec['domain'] = arg end opts.on( '--organization ORG', 'The organization name in Foreman') do |arg| @buildspec['organization'] = arg end opts.on( '--owner OWNER', 'The owner name in Foreman') do |arg| @buildspec['owner'] = arg end opts.on( '--hostgroup HOSTGROUP', 'The Foreman hostgroup') do |arg| @buildspec['hostgroup'] = arg end opts.on( '--compute-resource RESOURCE', 'The Foreman compute resource') do |arg| @buildspec['compute_resource'] = arg end opts.on( '--provision-method METHOD', 'The provisioning method (image or network)') do |arg| @buildspec['provision_method'] = arg end opts.on( '--environment ENVIRONMENT', 'The Puppet environment') do |arg| @buildspec['environment'] = arg end opts.on( '--network-interface INTERFACE', 'The network interface') do |arg| @buildspec['network_interface'] = arg end opts.on( '--disk-capacity SIZE', 'The size of the first disk, using M or G suffixes') do |arg| @buildspec['disk_capacity'] = arg end opts.on( '--disk-format FORMAT', 'The format of the disk. Can be raw or qcow2') do |arg| @buildspec['disk_format'] = arg end opts.on( '--storage-pool POOL', 'The storage pool') do |arg| @buildspec['storage_pool'] = arg end opts.on( '--subnet SUBNET', 'The subnet') do |arg| @buildspec['subnet'] = arg end # TODO: some nice header thing opts.separator "" opts.separator "Actions:" opts.on( '--rebuild', 'Rebuild the host') do |arg| @action = 'rebuild' end opts.on( '--create', 'Create the host') do |arg| @action = 'create' end opts.on( '--create-storage', 'Create storage and attach it to the host') do |arg| @action = 'create-storage' end opts.on( '--delete', 'Delete the host') do |arg| @action = 'delete' end opts.on( '--start', 'Power on the VM') do |arg| @action = 'start' end opts.on( '--stop', 'Power off the VM, without doing a graceful shutdown') do |arg| @action = 'stop' end opts.on( '--dumpxml', 'Show the libvirt XML virtual machine definition') do |arg| @action = 'dumpxml' end opts.on( '--monitor-boot', 'Watch the console of a VM as it boots') do |arg| @action = 'monitor-boot' end opts.on( '--enable-libgfapi', 'Enable GlusterFS libgfapi') do |arg| @action = 'enable-libgfapi' end opts.on( '--disable-libgfapi', 'Disable GlusterFS libgfapi') do |arg| @action = 'disable-libgfapi' end ## Verbosity (DEBUG, WARN, INFO, etc.) ##log_level="DEBUG" opts.on( '-h', '--help', 'Display this screen' ) do puts opts exit end end optparse.parse! ask_password if @password.nil? end
Rebuild a virtual machine
# File lib/foreman_vm.rb, line 152 def rebuild # Determine the hypervisor @buildspec['compute_resource'] = @cluster.find(fqdn) refresh_storage_pool(@buildspec) # Destroy the puppet certificate # XXX-KLUDGE system "curl -X DELETE http://util-stage-001.brontolabs.local:8443/puppet/ca/#{fqdn}" # TODO: if spec['_copy']... if @buildspec['_clone'] virsh "destroy #{self.fqdn}" if domstate == 'running' @buildspec['disk_format'] = 'qcow2' enable_libgfapi @config.glusterfs_server clone_volume start() else # Build via Kickstart: # # Destroy the puppet certificate, and enable build at the next boot @foreman_api.request(:put, "/hosts/#{fqdn}", { 'host' => {'build' => '1' }}) # Call 'virsh destroy' to kill the VM, then power it back on stop sleep 3 start end end
Create a snapshot of the virtual machine
# File lib/foreman_vm/domain.rb, line 84 def snapshot_create raise 'the VM must be powered down before taking a snapshot' \ if domstate != 'running' raise 'a snapshot already exists' if snapshot_list =~ /shutoff/ virsh("snapshot-create #{fqdn}") end
Delete a virtual machine snapshot
# File lib/foreman_vm/domain.rb, line 101 def snapshot_delete virsh("snapshot-delete #{fqdn} --current") end
# File lib/foreman_vm/domain.rb, line 78 def snapshot_list virsh("snapshot-list #{self.fqdn}") end
Revert a virtual machine back to a snapshot
# File lib/foreman_vm/domain.rb, line 93 def snapshot_revert raise 'a snapshot does not exist' if snapshot_list =~ /shutoff/ stop virsh("snapshot-revert #{fqdn} --current") end
Power on the virtual machine
# File lib/foreman_vm/domain.rb, line 122 def start if domstate != 'running' virsh("start #{self.fqdn}") end end
Power off the virtual machine
# File lib/foreman_vm/domain.rb, line 107 def stop if domstate != 'shut off' virsh("destroy #{self.fqdn}") end end
Run a virsh command
# File lib/foreman_vm.rb, line 82 def virsh(command,xml=nil) # KLUDGE: virsh requires the FQDN of the hypervisor # while foreman uses shortname hypervisor_fqdn = hypervisor unless hypervisor_fqdn =~ /\./ hypervisor_fqdn += '.' + `dnsdomainname`.chomp end ENV['LIBVIRT_AUTH_FILE'] = File.dirname(__FILE__) + '/../conf/auth.conf' buf = "virsh -c qemu+tcp://#{hypervisor_fqdn}/system " + command @log.info "running virsh #{command} on #{hypervisor_fqdn}" if xml.nil? res = `#{buf}` raise "virsh command returned #{$?}: #{buf}" if $? != 0; else f = IO.popen(buf, 'w') f.puts xml f.close #XXX-FIXME error check res = '(FIXME -- NEED TO CAPTURE STDOUT)' end return res end
Check if a VM with a given hostname
exists.
# File lib/foreman_vm.rb, line 31 def vm_exists?(hostname) @cluster.find(hostname).nil? ? false : true end
Wipe an existing disk volume to fix the permissions This is sadly needed to get the uid:gid to be qemu:qemu
# File lib/foreman_vm/volume.rb, line 41 def wipe_volume delete_volume # BUG: We would like to do this, but it creates the file owned by root:root #virsh "vol-create-as --pool #{@buildspec['storage_pool']} --name #{fqdn()}-disk1 --capacity 30G --format qcow2 --backing-vol #{@buildspec['_disk_backing_file']} --backing-vol-format qcow2" # # WORKAROUND: use an XML volume definition to set the owner/group # xml = "<volume> <name>#{fqdn}-disk1</name> <key>/gvol/images/#{fqdn}-disk1</key> <source> </source> <capacity unit='bytes'>32212254720</capacity> <allocation unit='bytes'>197120</allocation> <target> <path>/gvol/images/#{fqdn}-disk1</path> <format type='raw'/> <permissions> <mode>0660</mode> <owner>107</owner> <group>107</group> </permissions> </target> </volume> " @log.debug "creating volume: #{xml}" virsh("vol-create --pool gvol --file /dev/stdin >/dev/null", xml) end
Private Instance Methods
Returns true if an IP address is already in use. Verify that Foreman did not allocate an IP address that is currently in use This is some extra sanity checking that should be handled in Foreman, but it has failed to detect conflicts in the past.
# File lib/foreman_vm.rb, line 396 def ip_address_in_use?(ipaddr) raise ArgumentError if ipaddr =~ /[^0-9.]/ has_ping = system "ping -c 3 -W5 #{ipaddr} > /dev/null" begin name = Resolv.new.getname ipaddr has_dns = true rescue Resolv::ResolvError has_dns = false end #puts "has_ping=#{has_ping.to_s} has_dns=#{has_dns.to_s}" return (has_ping or has_dns) end
Allow memory to be specified with a G/M/K suffix Returns the number of bytes.
# File lib/foreman_vm.rb, line 372 def normalize_memory(s) if s =~ /G$/ ($`.to_i * (1024**3)).to_s elsif s =~ /M$/ ($`.to_i * (1024**2)).to_s elsif s =~ /K$/ ($`.to_i * 1024).to_s else s end end
Refresh a storage pool to detect changes made by other hypervisors
# File lib/foreman_vm.rb, line 385 def refresh_storage_pool(spec) fqdn = spec['compute_resource'] fqdn += '.' + spec['domain'] unless fqdn =~ /\./ @log.debug "refreshing the #{@config.storage_pool} pool on #{fqdn}" @cluster.member(fqdn).storage_pool(@config.storage_pool).refresh end