From c9d8e9b0af4db7df2ca92e01fd6ef886ba2ace4d Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Mon, 7 Aug 2023 03:05:35 +0500 Subject: [PATCH 01/14] update ssh pub key --- demo2-instance-with-init-script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo2-instance-with-init-script.py b/demo2-instance-with-init-script.py index 3adb7eb..25142dc 100644 --- a/demo2-instance-with-init-script.py +++ b/demo2-instance-with-init-script.py @@ -28,7 +28,7 @@ ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" # id_rsa.pub should look like this (standard sshd pubkey format): # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME -keypair_name = 'srieger-pub' +keypair_name = 'CloudComp30-keypair' pub_key_file = '~/.ssh/id_rsa.pub' flavor_name = 'm1.small' -- 2.34.1 From 7c2a5341af6fa1f1e99feaf136506e6eae4968e3 Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Mon, 7 Aug 2023 10:35:28 +0500 Subject: [PATCH 02/14] demo2-instance-with-init-script.py converted to python 3 using 2to3 --- demo2-instance-with-init-script.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/demo2-instance-with-init-script.py b/demo2-instance-with-init-script.py index 25142dc..d7f7cc9 100644 --- a/demo2-instance-with-init-script.py +++ b/demo2-instance-with-init-script.py @@ -104,7 +104,7 @@ def main(): keypair_exists = True if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') + print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) else: print('adding keypair...') conn.import_key_pair_from_file(keypair_name, pub_key_file) @@ -128,7 +128,7 @@ def main(): security_group_exists = True if security_group_exists: - print('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.') + print(('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.')) else: all_in_one_security_group = conn.ex_create_security_group(security_group_name, 'network access for all-in-one application.') @@ -159,7 +159,7 @@ def main(): instance_exists = True if instance_exists: - print('Instance ' + testing_instance.name + ' already exists. Skipping creation.') + print(('Instance ' + testing_instance.name + ' already exists. Skipping creation.')) exit() else: print('Starting new all-in-one instance and wait until it is running...') @@ -181,12 +181,12 @@ def main(): private_ip = None if len(testing_instance.private_ips): private_ip = testing_instance.private_ips[0] - print('Private IP found: {}'.format(private_ip)) + print(('Private IP found: {}'.format(private_ip))) public_ip = None if len(testing_instance.public_ips): public_ip = testing_instance.public_ips[0] - print('Public IP found: {}'.format(public_ip)) + print(('Public IP found: {}'.format(public_ip))) print('Checking for unused Floating IP...') unused_floating_ip = None @@ -197,11 +197,11 @@ def main(): if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()): pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) + print(('Allocating new Floating IP from pool: {}'.format(pool))) unused_floating_ip = pool.create_floating_ip() if public_ip: - print('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.') + print(('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.')) elif unused_floating_ip: conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip) @@ -214,7 +214,7 @@ def main(): actual_ip_address = private_ip print('\n') - print('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address)) + print(('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address))) print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n' 'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@" if your private\n' -- 2.34.1 From 33dfb54db2200278d58e954e6a035bb16c91becd Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Mon, 7 Aug 2023 11:00:31 +0500 Subject: [PATCH 03/14] demo1-getting-started.py converted to python 3 using 2to3 library --- demo1-getting-started.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo1-getting-started.py b/demo1-getting-started.py index 3030714..d7ef9cb 100644 --- a/demo1-getting-started.py +++ b/demo1-getting-started.py @@ -50,7 +50,7 @@ def main(): # make sure to include ex_force_auth_version='3.x_password', as needed in our environment provider = get_driver(Provider.OPENSTACK) - print("Opening connection to %s as %s..." % (auth_url, auth_username)) + print(("Opening connection to %s as %s..." % (auth_url, auth_username))) conn = provider(auth_username, auth_password, -- 2.34.1 From 9a998ad6318368365137ec417a69f5007ff3eb21 Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Mon, 7 Aug 2023 11:40:03 +0500 Subject: [PATCH 04/14] Converted all these files from python 2 to python 3 syntax usong 2to3 library (demo3-microservices.py,demo4-scale-out-add-worker.py,demo4-scale-out.py,demo5-1-durable-storage.py,demo5-2-backup-fractals.py,destroy-all-demo-instances.py) --- demo1-getting-started.py.bak | 126 +++++++++ demo2-instance-with-init-script.py.bak | 230 +++++++++++++++++ demo3-microservice.py | 14 +- demo3-microservice.py.bak | 276 ++++++++++++++++++++ demo4-scale-out-add-worker.py | 10 +- demo4-scale-out-add-worker.py.bak | 190 ++++++++++++++ demo4-scale-out.py | 12 +- demo4-scale-out.py.bak | 345 +++++++++++++++++++++++++ demo5-1-durable-storage.py | 6 +- demo5-1-durable-storage.py.bak | 123 +++++++++ demo5-2-backup-fractals.py | 6 +- demo5-2-backup-fractals.py.bak | 97 +++++++ destroy-all-demo-instances.py | 4 +- destroy-all-demo-instances.py.bak | 97 +++++++ 14 files changed, 1510 insertions(+), 26 deletions(-) create mode 100644 demo1-getting-started.py.bak create mode 100644 demo2-instance-with-init-script.py.bak create mode 100644 demo3-microservice.py.bak create mode 100644 demo4-scale-out-add-worker.py.bak create mode 100644 demo4-scale-out.py.bak create mode 100644 demo5-1-durable-storage.py.bak create mode 100644 demo5-2-backup-fractals.py.bak create mode 100644 destroy-all-demo-instances.py.bak diff --git a/demo1-getting-started.py.bak b/demo1-getting-started.py.bak new file mode 100644 index 0000000..3030714 --- /dev/null +++ b/demo1-getting-started.py.bak @@ -0,0 +1,126 @@ +# Example for Cloud Computing Course Master AI / GSD +# +# uses libCloud: https://libcloud.apache.org/ +# libCloud API documentation: https://libcloud.readthedocs.io/en/latest/ +# OpenStack API documentation: https://developer.openstack.org/ +# this code was initially based on the former tutorial: https://developer.openstack.org/firstapp-libcloud/ + +import getpass + +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +######################################################################################################################## +# +# no changes necessary below this line in this example +# +######################################################################################################################## + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) +# A network in the project the started instance will be attached to +project_network = 'CloudComp' + str(group_number) + '-net' + +# The image to look for and use for the started instance +ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +# domain_name = "default" + + +def main(): + # get the password from user + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + # instantiate a connection to the OpenStack private cloud + # make sure to include ex_force_auth_version='3.x_password', as needed in our environment + provider = get_driver(Provider.OPENSTACK) + + print("Opening connection to %s as %s..." % (auth_url, auth_username)) + + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name) + # ex_domain_name=domain_name) + + print("Getting images and selecting desired one...") + print("=========================================================================") + + # get a list of images offered in the cloud context (e.g. Ubuntu 20.04, cirros, ...) + images = conn.list_images() + image = '' + for img in images: + if img.name == ubuntu_image_name: + image = img + print(img) + + print("Getting flavors...") + print("=========================================================================") + + # get a list of flavors offered in the cloud context (e.g. m1.small, m1.medium, ...) + flavors = conn.list_sizes() + for flavor in flavors: + print(flavor) + + print("Selecting desired flavor...") + print("=========================================================================") + + # get the flavor with id 2 + flavor_id = '2' + flavor = conn.ex_get_size(flavor_id) + print(flavor) + + print("Selecting desired network...") + print("=========================================================================") + + # get a list of networks in the cloud context + networks = conn.ex_list_networks() + network = '' + for net in networks: + if net.name == project_network: + network = net + + print("Create instance 'testing'...") + print("=========================================================================") + + # create a new instance with the name "testing" + # make sure to provide networks (networks={network}) the instance should be attached to + instance_name = 'testing' + testing_instance = conn.create_node(name=instance_name, image=image, size=flavor, networks={network}) + print(testing_instance) + + print("Showing all running instances...") + print("=========================================================================") + + # show all instances (running nodes) in the cloud context + instances = conn.list_nodes() + for instance in instances: + print(instance) + + print("Destroying instance...") + print("=========================================================================") + + # destroy the instance we have just created + conn.destroy_node(testing_instance) + + +# method that is called when the script is started from the command line +if __name__ == '__main__': + main() diff --git a/demo2-instance-with-init-script.py.bak b/demo2-instance-with-init-script.py.bak new file mode 100644 index 0000000..25142dc --- /dev/null +++ b/demo2-instance-with-init-script.py.bak @@ -0,0 +1,230 @@ +# import getpass +# import os + +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) +# A network in the project the started instance will be attached to +project_network = 'CloudComp' + str(group_number) + '-net' + +# The image to look for and use for the started instance +ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? + +# The public key to be used for SSH connection, please make sure, that you have the corresponding private key +# +# id_rsa.pub should look like this (standard sshd pubkey format): +# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME + +keypair_name = 'CloudComp30-keypair' +pub_key_file = '~/.ssh/id_rsa.pub' + +flavor_name = 'm1.small' + + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +# domain_name = "default" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + # if "OS_PASSWORD" in os.environ: + # auth_password = os.environ["OS_PASSWORD"] + # else: + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + ########################################################################### + # + # create connection + # + ########################################################################### + + provider = get_driver(Provider.OPENSTACK) + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name) + # ex_domain_name=domain_name) + + ########################################################################### + # + # get image, flavor, network for instance creation + # + ########################################################################### + images = conn.list_images() + image = '' + for img in images: + if img.name == ubuntu_image_name: + image = img + + flavors = conn.list_sizes() + flavor = '' + for flav in flavors: + if flav.name == flavor_name: + flavor = conn.ex_get_size(flav.id) + + networks = conn.ex_list_networks() + network = '' + for net in networks: + if net.name == project_network: + network = net + + ########################################################################### + # + # create keypair dependency + # + ########################################################################### + + print('Checking for existing SSH key pair...') + keypair_exists = False + for keypair in conn.list_key_pairs(): + if keypair.name == keypair_name: + keypair_exists = True + + if keypair_exists: + print('Keypair ' + keypair_name + ' already exists. Skipping import.') + else: + print('adding keypair...') + conn.import_key_pair_from_file(keypair_name, pub_key_file) + + for keypair in conn.list_key_pairs(): + print(keypair) + + ########################################################################### + # + # create security group dependency + # + ########################################################################### + + print('Checking for existing security group...') + security_group_name = 'all-in-one' + security_group_exists = False + all_in_one_security_group = '' + for security_group in conn.ex_list_security_groups(): + if security_group.name == security_group_name: + all_in_one_security_group = security_group + security_group_exists = True + + if security_group_exists: + print('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.') + else: + all_in_one_security_group = conn.ex_create_security_group(security_group_name, + 'network access for all-in-one application.') + conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 80, 80) + conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 22, 22) + + for security_group in conn.ex_list_security_groups(): + print(security_group) + + ########################################################################### + # + # create all-in-one instance + # + ########################################################################### + + userdata = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i faafo -i messaging -r api -r worker -r demo + ''' + + print('Checking for existing instance...') + instance_name = 'all-in-one' + instance_exists = False + testing_instance = '' + for instance in conn.list_nodes(): + if instance.name == instance_name: + testing_instance = instance + instance_exists = True + + if instance_exists: + print('Instance ' + testing_instance.name + ' already exists. Skipping creation.') + exit() + else: + print('Starting new all-in-one instance and wait until it is running...') + testing_instance = conn.create_node(name=instance_name, + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata, + ex_security_groups=[all_in_one_security_group]) + conn.wait_until_running(nodes=[testing_instance], timeout=120, ssh_interface='private_ips') + + ########################################################################### + # + # assign all-in-one instance floating ip + # + ########################################################################### + + private_ip = None + if len(testing_instance.private_ips): + private_ip = testing_instance.private_ips[0] + print('Private IP found: {}'.format(private_ip)) + + public_ip = None + if len(testing_instance.public_ips): + public_ip = testing_instance.public_ips[0] + print('Public IP found: {}'.format(public_ip)) + + print('Checking for unused Floating IP...') + unused_floating_ip = None + for floating_ip in conn.ex_list_floating_ips(): + if not floating_ip.node_id: + unused_floating_ip = floating_ip + break + + if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()): + pool = conn.ex_list_floating_ip_pools()[0] + print('Allocating new Floating IP from pool: {}'.format(pool)) + unused_floating_ip = pool.create_floating_ip() + + if public_ip: + print('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.') + elif unused_floating_ip: + conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip) + + actual_ip_address = None + if public_ip: + actual_ip_address = public_ip + elif unused_floating_ip: + actual_ip_address = unused_floating_ip.ip_address + elif private_ip: + actual_ip_address = private_ip + + print('\n') + print('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address)) + + print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n' + 'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@" if your private\n' + 'key is in the default location.\n\n' + 'After login, you can list or "ssh ubuntu@" available fractals using "faafo list". To request\n' + 'the generation of new fractals, you can use "faafo create".\n\n' + 'You can also see other options to use the faafo example cloud service using "faafo -h".\n\n' + 'If you cannot start faafo command and/or do not see the webpage, you can check the Instance Console Log of\n' + 'the instance, e.g., in OpenStack web interface.') + + +if __name__ == '__main__': + main() diff --git a/demo3-microservice.py b/demo3-microservice.py index 4c1a5ad..d54cda7 100644 --- a/demo3-microservice.py +++ b/demo3-microservice.py @@ -109,7 +109,7 @@ def main(): keypair_exists = True if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') + print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) else: print('adding keypair...') conn.import_key_pair_from_file(keypair_name, pub_key_file) @@ -133,7 +133,7 @@ def main(): security_group_exists = True if security_group_exists: - print('Worker Security Group ' + worker_security_group.name + ' already exists. Skipping creation.') + print(('Worker Security Group ' + worker_security_group.name + ' already exists. Skipping creation.')) else: worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) @@ -148,7 +148,7 @@ def main(): security_group_exists = True if security_group_exists: - print('Controller Security Group ' + controller_security_group.name + ' already exists. Skipping creation.') + print(('Controller Security Group ' + controller_security_group.name + ' already exists. Skipping creation.')) else: controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node') conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22) @@ -205,11 +205,11 @@ def main(): if not unused_floating_ip: pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) + print(('Allocating new Floating IP from pool: {}'.format(pool))) unused_floating_ip = pool.create_floating_ip() conn.ex_attach_floating_ip_to_node(instance_controller_1, unused_floating_ip) - print('Controller Application will be deployed to http://%s' % unused_floating_ip.ip_address) + print(('Controller Application will be deployed to http://%s' % unused_floating_ip.ip_address)) ########################################################################### # @@ -261,11 +261,11 @@ def main(): if not unused_floating_ip: pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) + print(('Allocating new Floating IP from pool: {}'.format(pool))) unused_floating_ip = pool.create_floating_ip() conn.ex_attach_floating_ip_to_node(instance_worker_1, unused_floating_ip) - print('The worker will be available for SSH at %s' % unused_floating_ip.ip_address) + print(('The worker will be available for SSH at %s' % unused_floating_ip.ip_address)) print('You can use ssh to login to the controller using your private key. After login, you can list available ' 'fractals using "faafo list". To request the generation of new fractals, you can use "faafo create". ' diff --git a/demo3-microservice.py.bak b/demo3-microservice.py.bak new file mode 100644 index 0000000..4c1a5ad --- /dev/null +++ b/demo3-microservice.py.bak @@ -0,0 +1,276 @@ +# import getpass +# import os + +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# reqs: +# services: nova, glance, neutron +# resources: 2 instances, 2 floating ips (1 keypair, 2 security groups) + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) +# A network in the project the started instance will be attached to +project_network = 'CloudComp' + str(group_number) + '-net' + +# The image to look for and use for the started instance +ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? + +# The public key to be used for SSH connection, please make sure, that you have the corresponding private key +# +# id_rsa.pub should look like this (standard sshd pubkey format): +# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME + +keypair_name = 'srieger-pub' +pub_key_file = '~/.ssh/id_rsa.pub' + +flavor_name = 'm1.small' + + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +# domain_name = "default" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + # if "OS_PASSWORD" in os.environ: + # auth_password = os.environ["OS_PASSWORD"] + # else: + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + ########################################################################### + # + # create connection + # + ########################################################################### + + provider = get_driver(Provider.OPENSTACK) + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name) + # ex_domain_name=domain_name) + + ########################################################################### + # + # get image, flavor, network for instance creation + # + ########################################################################### + + images = conn.list_images() + image = '' + for img in images: + if img.name == ubuntu_image_name: + image = img + + flavors = conn.list_sizes() + flavor = '' + for flav in flavors: + if flav.name == flavor_name: + flavor = conn.ex_get_size(flav.id) + + networks = conn.ex_list_networks() + network = '' + for net in networks: + if net.name == project_network: + network = net + + ########################################################################### + # + # create keypair dependency + # + ########################################################################### + + print('Checking for existing SSH key pair...') + keypair_exists = False + for keypair in conn.list_key_pairs(): + if keypair.name == keypair_name: + keypair_exists = True + + if keypair_exists: + print('Keypair ' + keypair_name + ' already exists. Skipping import.') + else: + print('adding keypair...') + conn.import_key_pair_from_file(keypair_name, pub_key_file) + + for keypair in conn.list_key_pairs(): + print(keypair) + + ########################################################################### + # + # create security group dependency + # + ########################################################################### + + print('Checking for existing worker security group...') + security_group_name = 'worker' + security_group_exists = False + worker_security_group = '' + for security_group in conn.ex_list_security_groups(): + if security_group.name == security_group_name: + worker_security_group = security_group + security_group_exists = True + + if security_group_exists: + print('Worker Security Group ' + worker_security_group.name + ' already exists. Skipping creation.') + else: + worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') + conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) + + print('Checking for existing controller security group...') + security_group_name = 'control' + security_group_exists = False + controller_security_group = '' + for security_group in conn.ex_list_security_groups(): + if security_group.name == security_group_name: + controller_security_group = security_group + security_group_exists = True + + if security_group_exists: + print('Controller Security Group ' + controller_security_group.name + ' already exists. Skipping creation.') + else: + controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node') + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22) + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80) + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672, + source_security_group=worker_security_group) + + for security_group in conn.ex_list_security_groups(): + print(security_group) + + ########################################################################### + # + # create app-controller + # + ########################################################################### + + # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh + # is currently broken, hence the "rabbitctl" lines were added in the example + # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 + # + # Thanks to Stefan Friedmann for finding this fix ;) + + userdata = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i messaging -i faafo -r api + rabbitmqctl add_user faafo guest + rabbitmqctl set_user_tags faafo administrator + rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" + ''' + + print('Starting new app-controller instance and wait until it is running...') + instance_controller_1 = conn.create_node(name='app-controller', + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata, + ex_security_groups=[controller_security_group]) + + conn.wait_until_running(nodes=[instance_controller_1], timeout=120, ssh_interface='private_ips') + + ########################################################################### + # + # assign app-controller floating ip + # + ########################################################################### + + print('Checking for unused Floating IP...') + unused_floating_ip = None + for floating_ip in conn.ex_list_floating_ips(): + if not floating_ip.node_id: + unused_floating_ip = floating_ip + break + + if not unused_floating_ip: + pool = conn.ex_list_floating_ip_pools()[0] + print('Allocating new Floating IP from pool: {}'.format(pool)) + unused_floating_ip = pool.create_floating_ip() + + conn.ex_attach_floating_ip_to_node(instance_controller_1, unused_floating_ip) + print('Controller Application will be deployed to http://%s' % unused_floating_ip.ip_address) + + ########################################################################### + # + # getting id and ip address of app-controller instance + # + ########################################################################### + + # instance should not have a public ip? floating ips are assigned later + instance_controller_1 = conn.ex_get_node_details(instance_controller_1.id) + if instance_controller_1.public_ips: + ip_controller = instance_controller_1.public_ips[0] + else: + ip_controller = instance_controller_1.private_ips[0] + + ########################################################################### + # + # create app-worker-1 + # + ########################################################################### + + userdata = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/' + ''' % {'ip_controller': ip_controller} + + print('Starting new app-worker-1 instance and wait until it is running...') + instance_worker_1 = conn.create_node(name='app-worker-1', + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata, + ex_security_groups=[worker_security_group]) + + conn.wait_until_running(nodes=[instance_worker_1], timeout=120, ssh_interface='private_ips') + + ########################################################################### + # + # assign app-worker floating ip + # + ########################################################################### + + print('Checking for unused Floating IP...') + unused_floating_ip = None + for floating_ip in conn.ex_list_floating_ips(): + if not floating_ip.node_id: + unused_floating_ip = floating_ip + break + + if not unused_floating_ip: + pool = conn.ex_list_floating_ip_pools()[0] + print('Allocating new Floating IP from pool: {}'.format(pool)) + unused_floating_ip = pool.create_floating_ip() + + conn.ex_attach_floating_ip_to_node(instance_worker_1, unused_floating_ip) + print('The worker will be available for SSH at %s' % unused_floating_ip.ip_address) + + print('You can use ssh to login to the controller using your private key. After login, you can list available ' + 'fractals using "faafo list". To request the generation of new fractals, you can use "faafo create". ' + 'You can also see other options to use the faafo example cloud service using "faafo -h".') + + +if __name__ == '__main__': + main() diff --git a/demo4-scale-out-add-worker.py b/demo4-scale-out-add-worker.py index 14b40b1..d092b59 100644 --- a/demo4-scale-out-add-worker.py +++ b/demo4-scale-out-add-worker.py @@ -111,10 +111,10 @@ def main(): for instance in conn.list_nodes(): if instance.name == 'app-services': services_ip = instance.private_ips[0] - print('Found app-services fixed IP to be: ', services_ip) + print(('Found app-services fixed IP to be: ', services_ip)) if instance.name == 'app-api-1': api_1_ip = instance.private_ips[0] - print('Found app-api-1 fixed IP to be: ', api_1_ip) + print(('Found app-api-1 fixed IP to be: ', api_1_ip)) ########################################################################### # @@ -129,7 +129,7 @@ def main(): keypair_exists = True if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') + print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) else: print('adding keypair...') conn.import_key_pair_from_file(keypair_name, pub_key_file) @@ -145,10 +145,10 @@ def main(): def get_security_group(connection, security_group_name): """A helper function to check if security group already exists""" - print('Checking for existing ' + security_group_name + ' security group...') + print(('Checking for existing ' + security_group_name + ' security group...')) for security_grp in connection.ex_list_security_groups(): if security_grp.name == security_group_name: - print('Security Group ' + security_group_name + ' already exists. Skipping creation.') + print(('Security Group ' + security_group_name + ' already exists. Skipping creation.')) return security_grp return False diff --git a/demo4-scale-out-add-worker.py.bak b/demo4-scale-out-add-worker.py.bak new file mode 100644 index 0000000..14b40b1 --- /dev/null +++ b/demo4-scale-out-add-worker.py.bak @@ -0,0 +1,190 @@ +# import getpass +# import os +# import libcloud.security + +import time +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# reqs: +# services: nova, glance, neutron +# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) +# A network in the project the started instance will be attached to +project_network = 'CloudComp' + str(group_number) + '-net' + +# The image to look for and use for the started instance +ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? + +# The public key to be used for SSH connection, please make sure, that you have the corresponding private key +# +# id_rsa.pub should look like this (standard sshd pubkey format): +# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME + +keypair_name = 'srieger-pub' +pub_key_file = '~/.ssh/id_rsa.pub' + +flavor_name = 'm1.small' + + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +domain_name = "default" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + # if "OS_PASSWORD" in os.environ: + # auth_password = os.environ["OS_PASSWORD"] + # else: + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + ########################################################################### + # + # create connection + # + ########################################################################### + + # libcloud.security.VERIFY_SSL_CERT = False + + provider = get_driver(Provider.OPENSTACK) + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name, + ex_domain_name=domain_name) + + ########################################################################### + # + # get image, flavor, network for instance creation + # + ########################################################################### + + images = conn.list_images() + image = '' + for img in images: + if img.name == ubuntu_image_name: + image = img + + flavors = conn.list_sizes() + flavor = '' + for flav in flavors: + if flav.name == flavor_name: + flavor = conn.ex_get_size(flav.id) + + networks = conn.ex_list_networks() + network = '' + for net in networks: + if net.name == project_network: + network = net + + ########################################################################### + # + # get fixed a ip for service and api instance + # (better would be shared IP for the cluster etc.) + # + ########################################################################### + + # find service instance + for instance in conn.list_nodes(): + if instance.name == 'app-services': + services_ip = instance.private_ips[0] + print('Found app-services fixed IP to be: ', services_ip) + if instance.name == 'app-api-1': + api_1_ip = instance.private_ips[0] + print('Found app-api-1 fixed IP to be: ', api_1_ip) + + ########################################################################### + # + # create keypair dependency + # + ########################################################################### + + print('Checking for existing SSH key pair...') + keypair_exists = False + for keypair in conn.list_key_pairs(): + if keypair.name == keypair_name: + keypair_exists = True + + if keypair_exists: + print('Keypair ' + keypair_name + ' already exists. Skipping import.') + else: + print('adding keypair...') + conn.import_key_pair_from_file(keypair_name, pub_key_file) + + for keypair in conn.list_key_pairs(): + print(keypair) + + ########################################################################### + # + # create security group dependency + # + ########################################################################### + + def get_security_group(connection, security_group_name): + """A helper function to check if security group already exists""" + print('Checking for existing ' + security_group_name + ' security group...') + for security_grp in connection.ex_list_security_groups(): + if security_grp.name == security_group_name: + print('Security Group ' + security_group_name + ' already exists. Skipping creation.') + return security_grp + return False + + if not get_security_group(conn, "worker"): + worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') + conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) + else: + worker_security_group = get_security_group(conn, "worker") + + for security_group in conn.ex_list_security_groups(): + print(security_group) + + ########################################################################### + # + # create worker instances + # + ########################################################################### + + userdata_worker = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' + ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} + + # userdata-api-2 = '''#!/usr/bin/env bash + # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' + # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} + + print('Starting new app-worker-3 instance and wait until it is running...') + instance_worker_3 = conn.create_node(name='app-worker-3', + image=image, size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_worker, + ex_security_groups=[worker_security_group]) + + +if __name__ == '__main__': + main() diff --git a/demo4-scale-out.py b/demo4-scale-out.py index 86e1a24..4e60d1a 100644 --- a/demo4-scale-out.py +++ b/demo4-scale-out.py @@ -113,7 +113,7 @@ def main(): keypair_exists = True if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') + print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) else: print('adding keypair...') conn.import_key_pair_from_file(keypair_name, pub_key_file) @@ -131,7 +131,7 @@ def main(): for instance in conn.list_nodes(): if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', 'app-services', 'app-api-1', 'app-api-2']: - print('Destroying Instance: %s' % instance.name) + print(('Destroying Instance: %s' % instance.name)) conn.destroy_node(instance) # wait until all nodes are destroyed to be able to remove depended security groups @@ -149,7 +149,7 @@ def main(): # delete security groups for group in conn.ex_list_security_groups(): if group.name in ['control', 'worker', 'api', 'services']: - print('Deleting security group: %s' % group.name) + print(('Deleting security group: %s' % group.name)) conn.ex_delete_security_group(group) ########################################################################### @@ -160,10 +160,10 @@ def main(): def get_security_group(connection, security_group_name): """A helper function to check if security group already exists""" - print('Checking for existing ' + security_group_name + ' security group...') + print(('Checking for existing ' + security_group_name + ' security group...')) for security_grp in connection.ex_list_security_groups(): if security_grp.name == security_group_name: - print('Security Group ' + security_group_name + ' already exists. Skipping creation.') + print(('Security Group ' + security_group_name + ' already exists. Skipping creation.')) return worker_security_group return False @@ -292,7 +292,7 @@ def main(): for instance in [instance_api_1, instance_api_2]: floating_ip = get_floating_ip(conn) conn.ex_attach_floating_ip_to_node(instance, floating_ip) - print('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name}) + print(('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name})) ########################################################################### # diff --git a/demo4-scale-out.py.bak b/demo4-scale-out.py.bak new file mode 100644 index 0000000..86e1a24 --- /dev/null +++ b/demo4-scale-out.py.bak @@ -0,0 +1,345 @@ +# import getpass +# import os +# import libcloud.security + +import time +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# reqs: +# services: nova, glance, neutron +# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) +# A network in the project the started instance will be attached to +project_network = 'CloudComp' + str(group_number) + '-net' + +# The image to look for and use for the started instance +ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? + +# The public key to be used for SSH connection, please make sure, that you have the corresponding private key +# +# id_rsa.pub should look like this (standard sshd pubkey format): +# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME + +keypair_name = 'srieger-pub' +pub_key_file = '~/.ssh/id_rsa.pub' + +flavor_name = 'm1.small' + + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +domain_name = "default" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + # if "OS_PASSWORD" in os.environ: + # auth_password = os.environ["OS_PASSWORD"] + # else: + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + ########################################################################### + # + # create connection + # + ########################################################################### + + # libcloud.security.VERIFY_SSL_CERT = False + + provider = get_driver(Provider.OPENSTACK) + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name, + ex_domain_name=domain_name) + + ########################################################################### + # + # get image, flavor, network for instance creation + # + ########################################################################### + + images = conn.list_images() + image = '' + for img in images: + if img.name == ubuntu_image_name: + image = img + + flavors = conn.list_sizes() + flavor = '' + for flav in flavors: + if flav.name == flavor_name: + flavor = conn.ex_get_size(flav.id) + + networks = conn.ex_list_networks() + network = '' + for net in networks: + if net.name == project_network: + network = net + + ########################################################################### + # + # create keypair dependency + # + ########################################################################### + + print('Checking for existing SSH key pair...') + keypair_exists = False + for keypair in conn.list_key_pairs(): + if keypair.name == keypair_name: + keypair_exists = True + + if keypair_exists: + print('Keypair ' + keypair_name + ' already exists. Skipping import.') + else: + print('adding keypair...') + conn.import_key_pair_from_file(keypair_name, pub_key_file) + + for keypair in conn.list_key_pairs(): + print(keypair) + + ########################################################################### + # + # clean up resources from previous demos + # + ########################################################################### + + # destroy running demo instances + for instance in conn.list_nodes(): + if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', + 'app-services', 'app-api-1', 'app-api-2']: + print('Destroying Instance: %s' % instance.name) + conn.destroy_node(instance) + + # wait until all nodes are destroyed to be able to remove depended security groups + nodes_still_running = True + while nodes_still_running: + nodes_still_running = False + time.sleep(3) + instances = conn.list_nodes() + for instance in instances: + # if we see any demo instances still running continue to wait for them to stop + if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-controller']: + nodes_still_running = True + print('There are still instances running, waiting for them to be destroyed...') + + # delete security groups + for group in conn.ex_list_security_groups(): + if group.name in ['control', 'worker', 'api', 'services']: + print('Deleting security group: %s' % group.name) + conn.ex_delete_security_group(group) + + ########################################################################### + # + # create security group dependency + # + ########################################################################### + + def get_security_group(connection, security_group_name): + """A helper function to check if security group already exists""" + print('Checking for existing ' + security_group_name + ' security group...') + for security_grp in connection.ex_list_security_groups(): + if security_grp.name == security_group_name: + print('Security Group ' + security_group_name + ' already exists. Skipping creation.') + return worker_security_group + return False + + if not get_security_group(conn, "api"): + api_security_group = conn.ex_create_security_group('api', 'for API services only') + conn.ex_create_security_group_rule(api_security_group, 'TCP', 80, 80) + conn.ex_create_security_group_rule(api_security_group, 'TCP', 22, 22) + else: + api_security_group = get_security_group(conn, "api") + + if not get_security_group(conn, "worker"): + worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') + conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) + else: + worker_security_group = get_security_group(conn, "worker") + + if not get_security_group(conn, "control"): + controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node') + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22) + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80) + conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672, + source_security_group=worker_security_group) + + if not get_security_group(conn, "services"): + services_security_group = conn.ex_create_security_group('services', 'for DB and AMQP services only') + conn.ex_create_security_group_rule(services_security_group, 'TCP', 22, 22) + conn.ex_create_security_group_rule(services_security_group, 'TCP', 3306, 3306, + source_security_group=api_security_group) + conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672, + source_security_group=worker_security_group) + conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672, + source_security_group=api_security_group) + else: + services_security_group = get_security_group(conn, "services") + + for security_group in conn.ex_list_security_groups(): + print(security_group) + + ########################################################################### + # + # get floating ip helper function + # + ########################################################################### + + def get_floating_ip(connection): + """A helper function to re-use available Floating IPs""" + unused_floating_ip = None + for float_ip in connection.ex_list_floating_ips(): + if not float_ip.node_id: + unused_floating_ip = float_ip + break + if not unused_floating_ip: + pool = connection.ex_list_floating_ip_pools()[0] + unused_floating_ip = pool.create_floating_ip() + return unused_floating_ip + + ########################################################################### + # + # create app-services instance (database & messaging) + # + ########################################################################### + + # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh + # is currently broken, hence the "rabbitctl" lines were added in the example + # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 + # + # Thanks to Stefan Friedmann for finding this fix ;) + + userdata_service = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i database -i messaging + rabbitmqctl add_user faafo guest + rabbitmqctl set_user_tags faafo administrator + rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" + ''' + + print('Starting new app-services instance and wait until it is running...') + instance_services = conn.create_node(name='app-services', + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_service, + ex_security_groups=[services_security_group]) + instance_services = conn.wait_until_running(nodes=[instance_services], timeout=120, + ssh_interface='private_ips')[0][0] + services_ip = instance_services.private_ips[0] + + ########################################################################### + # + # create app-api instances + # + ########################################################################### + + userdata_api = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \ + -d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo' + ''' % {'services_ip': services_ip} + + print('Starting new app-api-1 instance and wait until it is running...') + instance_api_1 = conn.create_node(name='app-api-1', + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_api, + ex_security_groups=[api_security_group]) + + print('Starting new app-api-2 instance and wait until it is running...') + instance_api_2 = conn.create_node(name='app-api-2', + image=image, + size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_api, + ex_security_groups=[api_security_group]) + + instance_api_1 = conn.wait_until_running(nodes=[instance_api_1], timeout=120, + ssh_interface='private_ips')[0][0] + api_1_ip = instance_api_1.private_ips[0] + instance_api_2 = conn.wait_until_running(nodes=[instance_api_2], timeout=120, + ssh_interface='private_ips')[0][0] + # api_2_ip = instance_api_2.private_ips[0] + + for instance in [instance_api_1, instance_api_2]: + floating_ip = get_floating_ip(conn) + conn.ex_attach_floating_ip_to_node(instance, floating_ip) + print('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name}) + + ########################################################################### + # + # create worker instances + # + ########################################################################### + + userdata_worker = '''#!/usr/bin/env bash + curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' + ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} + + # userdata_api-api-2 = '''#!/usr/bin/env bash + # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' + # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} + + print('Starting new app-worker-1 instance and wait until it is running...') + instance_worker_1 = conn.create_node(name='app-worker-1', + image=image, size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_worker, + ex_security_groups=[worker_security_group]) + + print('Starting new app-worker-2 instance and wait until it is running...') + instance_worker_2 = conn.create_node(name='app-worker-2', + image=image, size=flavor, + networks=[network], + ex_keyname=keypair_name, + ex_userdata=userdata_worker, + ex_security_groups=[worker_security_group]) + + # do not start worker 3 initially, can be started using scale-out-add-worker.py demo + + #print('Starting new app-worker-3 instance and wait until it is running...') + #instance_worker_3 = conn.create_node(name='app-worker-3', + # image=image, size=flavor, + # networks=[network], + # ex_keyname=keypair_name, + # ex_userdata=userdata_worker, + # ex_security_groups=[worker_security_group]) + + print(instance_worker_1) + print(instance_worker_2) + #print(instance_worker_3) + + +if __name__ == '__main__': + main() diff --git a/demo5-1-durable-storage.py b/demo5-1-durable-storage.py index 8ffc581..79d2b8f 100644 --- a/demo5-1-durable-storage.py +++ b/demo5-1-durable-storage.py @@ -1,4 +1,4 @@ -from __future__ import print_function + import getpass import os @@ -27,12 +27,12 @@ def main(): if "OS_PROJECT_NAME" in os.environ: project_name = os.environ["OS_PROJECT_NAME"] else: - project_name = input("Enter your OpenStack project:") + project_name = eval(input("Enter your OpenStack project:")) if "OS_USERNAME" in os.environ: auth_username = os.environ["OS_USERNAME"] else: - auth_username = input("Enter your OpenStack username:") + auth_username = eval(input("Enter your OpenStack username:")) if "OS_PASSWORD" in os.environ: auth_password = os.environ["OS_PASSWORD"] diff --git a/demo5-1-durable-storage.py.bak b/demo5-1-durable-storage.py.bak new file mode 100644 index 0000000..8ffc581 --- /dev/null +++ b/demo5-1-durable-storage.py.bak @@ -0,0 +1,123 @@ +from __future__ import print_function + +import getpass +import os + +import libcloud.security +from libcloud.storage.providers import get_driver +from libcloud.storage.types import Provider + +# reqs: +# services: nova, glance, neutron +# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) + +# HS-Fulda Private Cloud +auth_url = 'https://192.168.72.40:5000' +region_name = 'RegionOne' +domain_name = "hsfulda" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + if "OS_PROJECT_NAME" in os.environ: + project_name = os.environ["OS_PROJECT_NAME"] + else: + project_name = input("Enter your OpenStack project:") + + if "OS_USERNAME" in os.environ: + auth_username = os.environ["OS_USERNAME"] + else: + auth_username = input("Enter your OpenStack username:") + + if "OS_PASSWORD" in os.environ: + auth_password = os.environ["OS_PASSWORD"] + else: + auth_password = getpass.getpass("Enter your OpenStack password:") + + ########################################################################### + # + # create connection + # + ########################################################################### + + libcloud.security.VERIFY_SSL_CERT = False + + provider = get_driver(Provider.OPENSTACK_SWIFT) + swift = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name, + ex_domain_name=domain_name) + + ########################################################################### + # + # create container + # + ########################################################################### + + container_name = 'fractals' + containers = swift.list_containers() + container = False + for con in containers: + if con.name == container_name: + container = con + + if not container: + container = swift.create_container(container_name=container_name) + + print(container) + + print(swift.list_containers()) + + ########################################################################### + # + # upload a goat + # + ########################################################################### + + object_name = 'an amazing goat' + file_path = 'C:\\Users\\Sebastian\\goat.jpg' + objects = container.list_objects() + object_data = False + for obj in objects: + if obj.name == object_name: + object_data = obj + + if not object_data: + # print(os.getcwd()) + container = swift.get_container(container_name=container_name) + object_data = container.upload_object(file_path=file_path, object_name=object_name) + + objects = container.list_objects() + print(objects) + + ########################################################################### + # + # check goat integrity + # + ########################################################################### + + import hashlib + print(hashlib.md5(open(file_path, 'rb').read()).hexdigest()) + + ########################################################################### + # + # delete goat + # + ########################################################################### + + swift.delete_object(object_data) + + objects = container.list_objects() + print(objects) + + +if __name__ == '__main__': + main() diff --git a/demo5-2-backup-fractals.py b/demo5-2-backup-fractals.py index ff49805..121f551 100644 --- a/demo5-2-backup-fractals.py +++ b/demo5-2-backup-fractals.py @@ -1,4 +1,4 @@ -from __future__ import print_function + import getpass import json @@ -28,12 +28,12 @@ def main(): if "OS_PROJECT_NAME" in os.environ: project_name = os.environ["OS_PROJECT_NAME"] else: - project_name = input("Enter your OpenStack project:") + project_name = eval(input("Enter your OpenStack project:")) if "OS_USERNAME" in os.environ: auth_username = os.environ["OS_USERNAME"] else: - auth_username = input("Enter your OpenStack username:") + auth_username = eval(input("Enter your OpenStack username:")) if "OS_PASSWORD" in os.environ: auth_password = os.environ["OS_PASSWORD"] diff --git a/demo5-2-backup-fractals.py.bak b/demo5-2-backup-fractals.py.bak new file mode 100644 index 0000000..ff49805 --- /dev/null +++ b/demo5-2-backup-fractals.py.bak @@ -0,0 +1,97 @@ +from __future__ import print_function + +import getpass +import json +import os + +import libcloud +import libcloud.security +import requests +from libcloud.storage.providers import get_driver +from libcloud.storage.types import Provider + +# HS-Fulda Private Cloud +auth_url = 'https://192.168.72.40:5000' +region_name = 'RegionOne' +domain_name = "hsfulda" + +api_ip = '192.168.72.102' + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + if "OS_PROJECT_NAME" in os.environ: + project_name = os.environ["OS_PROJECT_NAME"] + else: + project_name = input("Enter your OpenStack project:") + + if "OS_USERNAME" in os.environ: + auth_username = os.environ["OS_USERNAME"] + else: + auth_username = input("Enter your OpenStack username:") + + if "OS_PASSWORD" in os.environ: + auth_password = os.environ["OS_PASSWORD"] + else: + auth_password = getpass.getpass("Enter your OpenStack password:") + + ########################################################################### + # + # create connection + # + ########################################################################### + + libcloud.security.VERIFY_SSL_CERT = False + + provider = get_driver(Provider.OPENSTACK_SWIFT) + swift = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name, + ex_domain_name=domain_name) + + ########################################################################### + # + # create container + # + ########################################################################### + + container_name = 'fractals' + containers = swift.list_containers() + container = False + for con in containers: + if con.name == container_name: + container = con + + if not container: + container = swift.create_container(container_name=container_name) + + print(container) + + ########################################################################### + # + # backup existing fractals to container + # + ########################################################################### + + endpoint = 'http://' + api_ip + params = { 'results_per_page': '-1' } + response = requests.get('%s/v1/fractal' % endpoint, params=params) + data = json.loads(response.text) + for fractal in data['objects']: + response = requests.get('%s/fractal/%s' % (endpoint, fractal['uuid']), stream=True) + container.upload_object_via_stream(response.iter_content(), object_name=fractal['uuid']) + + for object_data in container.list_objects(): + print(object_data) + + +if __name__ == '__main__': + main() diff --git a/destroy-all-demo-instances.py b/destroy-all-demo-instances.py index c3b1204..8146c3a 100644 --- a/destroy-all-demo-instances.py +++ b/destroy-all-demo-instances.py @@ -70,7 +70,7 @@ def main(): for instance in conn.list_nodes(): if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', 'app-services', 'app-api-1', 'app-api-2']: - print('Destroying Instance: %s' % instance.name) + print(('Destroying Instance: %s' % instance.name)) conn.destroy_node(instance) # wait until all nodes are destroyed to be able to remove depended security groups @@ -89,7 +89,7 @@ def main(): # delete security groups for group in conn.ex_list_security_groups(): if group.name in ['control', 'worker', 'api', 'services']: - print('Deleting security group: %s' % group.name) + print(('Deleting security group: %s' % group.name)) conn.ex_delete_security_group(group) diff --git a/destroy-all-demo-instances.py.bak b/destroy-all-demo-instances.py.bak new file mode 100644 index 0000000..c3b1204 --- /dev/null +++ b/destroy-all-demo-instances.py.bak @@ -0,0 +1,97 @@ +# import getpass +# import os +# import libcloud.security + +import time +from libcloud.compute.providers import get_driver +from libcloud.compute.types import Provider + +# reqs: +# services: nova, glance, neutron +# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) + +# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, +# project etc., as coordinated in the lab sessions) + +group_number = 30 + + +# web service endpoint of the private cloud infrastructure +auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' +# your username in OpenStack +auth_username = 'CloudComp' + str(group_number) +# your project in OpenStack +project_name = 'CloudComp' + str(group_number) + + +# default region +region_name = 'RegionOne' +# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username +domain_name = "default" + + +def main(): + ########################################################################### + # + # get credentials + # + ########################################################################### + + # if "OS_PASSWORD" in os.environ: + # auth_password = os.environ["OS_PASSWORD"] + # else: + # auth_password = getpass.getpass("Enter your OpenStack password:") + auth_password = "demo" + + ########################################################################### + # + # create connection + # + ########################################################################### + + # libcloud.security.VERIFY_SSL_CERT = False + + provider = get_driver(Provider.OPENSTACK) + conn = provider(auth_username, + auth_password, + ex_force_auth_url=auth_url, + ex_force_auth_version='3.x_password', + ex_tenant_name=project_name, + ex_force_service_region=region_name, + ex_domain_name=domain_name) + + ########################################################################### + # + # clean up resources from previous demos + # + ########################################################################### + + # destroy running demo instances + for instance in conn.list_nodes(): + if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', + 'app-services', 'app-api-1', 'app-api-2']: + print('Destroying Instance: %s' % instance.name) + conn.destroy_node(instance) + + # wait until all nodes are destroyed to be able to remove depended security groups + nodes_still_running = True + while nodes_still_running: + nodes_still_running = False + time.sleep(3) + instances = conn.list_nodes() + for instance in instances: + # if we see any demo instances still running continue to wait for them to stop + if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', + 'app-services', 'app-api-1', 'app-api-2']: + nodes_still_running = True + print('There are still instances running, waiting for them to be destroyed...') + + # delete security groups + for group in conn.ex_list_security_groups(): + if group.name in ['control', 'worker', 'api', 'services']: + print('Deleting security group: %s' % group.name) + conn.ex_delete_security_group(group) + + +if __name__ == '__main__': + main() -- 2.34.1 From 3b0062a84bc1c9cb1db342e7f4912d06de78bc21 Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Mon, 7 Aug 2023 13:25:43 +0500 Subject: [PATCH 05/14] Converted all other files into python 3 using 2to3 lib --- faafo/bin/faafo | 2 +- faafo/bin/faafo-worker.bak | 52 ++++++ faafo/bin/faafo.bak | 267 +++++++++++++++++++++++++++++ faafo/contrib/test_api.py | 6 +- faafo/contrib/test_api.py.bak | 56 ++++++ faafo/doc/source/conf.py | 4 +- faafo/doc/source/conf.py.bak | 17 ++ faafo/faafo/__init__.py.bak | 0 faafo/faafo/api/__init__.py.bak | 0 faafo/faafo/api/service.py | 6 +- faafo/faafo/api/service.py.bak | 146 ++++++++++++++++ faafo/faafo/queues.py.bak | 32 ++++ faafo/faafo/version.py.bak | 15 ++ faafo/faafo/worker/__init__.py.bak | 0 faafo/setup.py.bak | 29 ++++ 15 files changed, 623 insertions(+), 9 deletions(-) create mode 100644 faafo/bin/faafo-worker.bak create mode 100644 faafo/bin/faafo.bak create mode 100644 faafo/contrib/test_api.py.bak create mode 100644 faafo/doc/source/conf.py.bak create mode 100644 faafo/faafo/__init__.py.bak create mode 100644 faafo/faafo/api/__init__.py.bak create mode 100644 faafo/faafo/api/service.py.bak create mode 100644 faafo/faafo/queues.py.bak create mode 100644 faafo/faafo/version.py.bak create mode 100644 faafo/faafo/worker/__init__.py.bak create mode 100644 faafo/setup.py.bak diff --git a/faafo/bin/faafo b/faafo/bin/faafo index a12770d..a90f50f 100644 --- a/faafo/bin/faafo +++ b/faafo/bin/faafo @@ -161,7 +161,7 @@ def do_create_fractal(): number = random.randint(int(CONF.command.min_tasks), int(CONF.command.max_tasks)) LOG.info("generating %d task(s)" % number) - for i in xrange(0, number): + for i in range(0, number): task = get_random_task() LOG.debug("created task %s" % task) # NOTE(berendt): only necessary when using requests < 2.4.2 diff --git a/faafo/bin/faafo-worker.bak b/faafo/bin/faafo-worker.bak new file mode 100644 index 0000000..67daf0b --- /dev/null +++ b/faafo/bin/faafo-worker.bak @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import kombu +from oslo_config import cfg +from oslo_log import log + +from faafo.worker import service as worker +from faafo import version + +LOG = log.getLogger('faafo.worker') +CONF = cfg.CONF + +# If ../faafo/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'faafo', '__init__.py')): + sys.path.insert(0, possible_topdir) + +if __name__ == '__main__': + log.register_options(CONF) + log.set_defaults() + + CONF(project='worker', prog='faafo-worker', + default_config_files=['/etc/faafo/faafo.conf'], + version=version.version_info.version_string()) + + log.setup(CONF, 'worker', + version=version.version_info.version_string()) + + connection = kombu.Connection(CONF.transport_url) + server = worker.Worker(connection) + try: + server.run() + except KeyboardInterrupt: + LOG.info("Caught keyboard interrupt. Exiting.") diff --git a/faafo/bin/faafo.bak b/faafo/bin/faafo.bak new file mode 100644 index 0000000..a12770d --- /dev/null +++ b/faafo/bin/faafo.bak @@ -0,0 +1,267 @@ +#!/usr/bin/env python + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import json +import random +import uuid + +from oslo_config import cfg +from oslo_log import log +from prettytable import PrettyTable +import requests + +from faafo import version + + +LOG = log.getLogger('faafo.client') +CONF = cfg.CONF + + +def get_random_task(): + random.seed() + + if CONF.command.width: + width = int(CONF.command.width) + else: + width = random.randint(int(CONF.command.min_width), + int(CONF.command.max_width)) + + if CONF.command.height: + height = int(CONF.command.height) + else: + height = random.randint(int(CONF.command.min_height), + int(CONF.command.max_height)) + + if CONF.command.iterations: + iterations = int(CONF.command.iterations) + else: + iterations = random.randint(int(CONF.command.min_iterations), + int(CONF.command.max_iterations)) + + if CONF.command.xa: + xa = float(CONF.command.xa) + else: + xa = random.uniform(float(CONF.command.min_xa), + float(CONF.command.max_xa)) + + if CONF.command.xb: + xb = float(CONF.command.xb) + else: + xb = random.uniform(float(CONF.command.min_xb), + float(CONF.command.max_xb)) + + if CONF.command.ya: + ya = float(CONF.command.ya) + else: + ya = random.uniform(float(CONF.command.min_ya), + float(CONF.command.max_ya)) + + if CONF.command.yb: + yb = float(CONF.command.yb) + else: + yb = random.uniform(float(CONF.command.min_yb), + float(CONF.command.max_yb)) + + task = { + 'uuid': str(uuid.uuid4()), + 'width': width, + 'height': height, + 'iterations': iterations, 'xa': xa, + 'xb': xb, + 'ya': ya, + 'yb': yb + } + + return task + + +def do_get_fractal(): + LOG.error("command 'download' not yet implemented") + + +def do_show_fractal(): + LOG.info("showing fractal %s" % CONF.command.uuid) + result = requests.get("%s/v1/fractal/%s" % + (CONF.endpoint_url, CONF.command.uuid)) + if result.status_code == 200: + data = json.loads(result.text) + output = PrettyTable(["Parameter", "Value"]) + output.align["Parameter"] = "l" + output.align["Value"] = "l" + output.add_row(["uuid", data['uuid']]) + output.add_row(["duration", "%f seconds" % data['duration']]) + output.add_row(["dimensions", "%d x %d pixels" % + (data['width'], data['height'])]) + output.add_row(["iterations", data['iterations']]) + output.add_row(["xa", data['xa']]) + output.add_row(["xb", data['xb']]) + output.add_row(["ya", data['ya']]) + output.add_row(["yb", data['yb']]) + output.add_row(["size", "%d bytes" % data['size']]) + output.add_row(["checksum", data['checksum']]) + output.add_row(["generated_by", data['generated_by']]) + print(output) + else: + LOG.error("fractal '%s' not found" % CONF.command.uuid) + + +def do_list_fractals(): + LOG.info("listing all fractals") + + fractals = get_fractals() + output = PrettyTable(["UUID", "Dimensions", "Filesize"]) + for fractal in fractals: + output.add_row([ + fractal["uuid"], + "%d x %d pixels" % (fractal["width"], fractal["height"]), + "%d bytes" % (fractal["size"] or 0), + ]) + print(output) + + +def get_fractals(page=1): + result = requests.get("%s/v1/fractal?page=%d" % + (CONF.endpoint_url, page)) + + fractals = [] + if result.status_code == 200: + data = json.loads(result.text) + if page < data['total_pages']: + fractals = data['objects'] + get_fractals(page + 1) + else: + return data['objects'] + + return fractals + + +def do_delete_fractal(): + LOG.info("deleting fractal %s" % CONF.command.uuid) + result = requests.delete("%s/v1/fractal/%s" % + (CONF.endpoint_url, CONF.command.uuid)) + LOG.debug("result: %s" %result) + + +def do_create_fractal(): + random.seed() + if CONF.command.tasks: + number = int(CONF.command.tasks) + else: + number = random.randint(int(CONF.command.min_tasks), + int(CONF.command.max_tasks)) + LOG.info("generating %d task(s)" % number) + for i in xrange(0, number): + task = get_random_task() + LOG.debug("created task %s" % task) + # NOTE(berendt): only necessary when using requests < 2.4.2 + headers = {'Content-type': 'application/json', + 'Accept': 'text/plain'} + requests.post("%s/v1/fractal" % CONF.endpoint_url, + json.dumps(task), headers=headers) + + +def add_command_parsers(subparsers): + parser = subparsers.add_parser('create') + parser.set_defaults(func=do_create_fractal) + parser.add_argument("--height", default=None, + help="The height of the generate image.") + parser.add_argument("--min-height", default=256, + help="The minimum height of the generate image.") + parser.add_argument("--max-height", default=1024, + help="The maximum height of the generate image.") + parser.add_argument("--width", default=None, + help="The width of the generated image.") + parser.add_argument("--min-width", default=256, + help="The minimum width of the generated image.") + parser.add_argument("--max-width", default=1024, + help="The maximum width of the generated image.") + parser.add_argument("--iterations", default=None, + help="The number of iterations.") + parser.add_argument("--min-iterations", default=128, + help="The minimum number of iterations.") + parser.add_argument("--max-iterations", default=512, + help="The maximum number of iterations.") + parser.add_argument("--tasks", default=None, + help="The number of generated fractals.") + parser.add_argument("--min-tasks", default=1, + help="The minimum number of generated fractals.") + parser.add_argument("--max-tasks", default=10, + help="The maximum number of generated fractals.") + parser.add_argument("--xa", default=None, + help="The value for the parameter 'xa'.") + parser.add_argument("--min-xa", default=-1.0, + help="The minimum value for the parameter 'xa'.") + parser.add_argument("--max-xa", default=-4.0, + help="The maximum value for the parameter 'xa'.") + parser.add_argument("--xb", default=None, + help="The value for the parameter 'xb'.") + parser.add_argument("--min-xb", default=1.0, + help="The minimum value for the parameter 'xb'.") + parser.add_argument("--max-xb", default=4.0, + help="The maximum value for the parameter 'xb'.") + parser.add_argument("--ya", default=None, + help="The value for the parameter 'ya'.") + parser.add_argument("--min-ya", default=-0.5, + help="The minimum value for the parameter 'ya'.") + parser.add_argument("--max-ya", default=-3, + help="The maximum value for the parameter 'ya'.") + parser.add_argument("--yb", default=None, + help="The value for the parameter 'yb'.") + parser.add_argument("--min-yb", default=0.5, + help="The minimum value for the parameter 'yb'.") + parser.add_argument("--max-yb", default=3, + help="The maximum value for the parameter 'yb'.") + + parser = subparsers.add_parser('delete') + parser.set_defaults(func=do_delete_fractal) + parser.add_argument("uuid", help="Fractal to delete.") + + parser = subparsers.add_parser('show') + parser.set_defaults(func=do_show_fractal) + parser.add_argument("uuid", help="Fractal to show.") + + parser = subparsers.add_parser('get') + parser.set_defaults(func=do_get_fractal) + parser.add_argument("uuid", help="Fractal to download.") + + parser = subparsers.add_parser('list') + parser.set_defaults(func=do_list_fractals) + + +client_commands = cfg.SubCommandOpt('command', title='Commands', + help='Show available commands.', + handler=add_command_parsers) + +CONF.register_cli_opts([client_commands]) + +client_cli_opts = [ + cfg.StrOpt('endpoint-url', + default='http://localhost', + help='API connection URL') +] + +CONF.register_cli_opts(client_cli_opts) + + +if __name__ == '__main__': + log.register_options(CONF) + log.set_defaults() + + CONF(project='client', prog='faafo-client', + version=version.version_info.version_string()) + + log.setup(CONF, 'client', + version=version.version_info.version_string()) + + CONF.command.func() diff --git a/faafo/contrib/test_api.py b/faafo/contrib/test_api.py index 559ac3d..6dd9823 100644 --- a/faafo/contrib/test_api.py +++ b/faafo/contrib/test_api.py @@ -34,11 +34,11 @@ assert response.status_code == 201 response = requests.get(url, headers=headers) assert response.status_code == 200 -print(response.json()) +print((response.json())) response = requests.get(url + '/' + uuid, headers=headers) assert response.status_code == 200 -print(response.json()) +print((response.json())) data = { 'checksum': 'c6fef4ef13a577066c2281b53c82ce2c7e94e', @@ -50,7 +50,7 @@ assert response.status_code == 200 response = requests.get(url + '/' + uuid, headers=headers) assert response.status_code == 200 -print(response.json()) +print((response.json())) response = requests.delete(url + '/' + uuid, headers=headers) assert response.status_code == 204 diff --git a/faafo/contrib/test_api.py.bak b/faafo/contrib/test_api.py.bak new file mode 100644 index 0000000..559ac3d --- /dev/null +++ b/faafo/contrib/test_api.py.bak @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import requests + +url = 'http://127.0.0.1/api/fractal' +headers = {'Content-Type': 'application/json'} + +uuid = '13bf15a8-9f6c-4d59-956f-7d20f7484687' +data = { + 'uuid': uuid, + 'width': 100, + 'height': 100, + 'iterations': 10, + 'xa': 1.0, + 'xb': -1.0, + 'ya': 1.0, + 'yb': -1.0, +} +response = requests.post(url, data=json.dumps(data), headers=headers) +assert response.status_code == 201 + +response = requests.get(url, headers=headers) +assert response.status_code == 200 +print(response.json()) + +response = requests.get(url + '/' + uuid, headers=headers) +assert response.status_code == 200 +print(response.json()) + +data = { + 'checksum': 'c6fef4ef13a577066c2281b53c82ce2c7e94e', + 'duration': 10.12 +} +response = requests.put(url + '/' + uuid, data=json.dumps(data), + headers=headers) +assert response.status_code == 200 + +response = requests.get(url + '/' + uuid, headers=headers) +assert response.status_code == 200 +print(response.json()) + +response = requests.delete(url + '/' + uuid, headers=headers) +assert response.status_code == 204 diff --git a/faafo/doc/source/conf.py b/faafo/doc/source/conf.py index d78328c..83d6994 100644 --- a/faafo/doc/source/conf.py +++ b/faafo/doc/source/conf.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -copyright = u'2015, OpenStack contributors' +copyright = '2015, OpenStack contributors' master_doc = 'index' -project = u'First App Application for OpenStack' +project = 'First App Application for OpenStack' source_suffix = '.rst' diff --git a/faafo/doc/source/conf.py.bak b/faafo/doc/source/conf.py.bak new file mode 100644 index 0000000..d78328c --- /dev/null +++ b/faafo/doc/source/conf.py.bak @@ -0,0 +1,17 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +copyright = u'2015, OpenStack contributors' +master_doc = 'index' +project = u'First App Application for OpenStack' +source_suffix = '.rst' diff --git a/faafo/faafo/__init__.py.bak b/faafo/faafo/__init__.py.bak new file mode 100644 index 0000000..e69de29 diff --git a/faafo/faafo/api/__init__.py.bak b/faafo/faafo/api/__init__.py.bak new file mode 100644 index 0000000..e69de29 diff --git a/faafo/faafo/api/service.py b/faafo/faafo/api/service.py index a3093c7..0a96f91 100644 --- a/faafo/faafo/api/service.py +++ b/faafo/faafo/api/service.py @@ -12,7 +12,7 @@ import base64 import copy -import cStringIO +import io from pkg_resources import resource_filename import flask @@ -119,8 +119,8 @@ def get_fractal(fractalid): response.status_code = 404 else: image_data = base64.b64decode(fractal.image) - image = Image.open(cStringIO.StringIO(image_data)) - output = cStringIO.StringIO() + image = Image.open(io.StringIO(image_data)) + output = io.StringIO() image.save(output, "PNG") image.seek(0) response = flask.make_response(output.getvalue()) diff --git a/faafo/faafo/api/service.py.bak b/faafo/faafo/api/service.py.bak new file mode 100644 index 0000000..a3093c7 --- /dev/null +++ b/faafo/faafo/api/service.py.bak @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import copy +import cStringIO +from pkg_resources import resource_filename + +import flask +from flask_restless import APIManager +from flask_sqlalchemy import SQLAlchemy +from flask_bootstrap import Bootstrap +from kombu import Connection +from kombu.pools import producers +from oslo_config import cfg +from oslo_log import log +from PIL import Image +from sqlalchemy.dialects import mysql + +from faafo import queues +from faafo import version + +LOG = log.getLogger('faafo.api') +CONF = cfg.CONF + +api_opts = [ + cfg.StrOpt('listen-address', + default='0.0.0.0', + help='Listen address.'), + cfg.IntOpt('bind-port', + default='80', + help='Bind port.'), + cfg.StrOpt('database-url', + default='sqlite:////tmp/sqlite.db', + help='Database connection URL.') +] + +CONF.register_opts(api_opts) + +log.register_options(CONF) +log.set_defaults() + +CONF(project='api', prog='faafo-api', + default_config_files=['/etc/faafo/faafo.conf'], + version=version.version_info.version_string()) + +log.setup(CONF, 'api', + version=version.version_info.version_string()) + +template_path = resource_filename(__name__, "templates") +app = flask.Flask('faafo.api', template_folder=template_path) +app.config['DEBUG'] = CONF.debug +app.config['SQLALCHEMY_DATABASE_URI'] = CONF.database_url +db = SQLAlchemy(app) +Bootstrap(app) + + +def list_opts(): + """Entry point for oslo-config-generator.""" + return [(None, copy.deepcopy(api_opts))] + + +class Fractal(db.Model): + uuid = db.Column(db.String(36), primary_key=True) + checksum = db.Column(db.String(256), unique=True) + url = db.Column(db.String(256), nullable=True) + duration = db.Column(db.Float) + size = db.Column(db.Integer, nullable=True) + width = db.Column(db.Integer, nullable=False) + height = db.Column(db.Integer, nullable=False) + iterations = db.Column(db.Integer, nullable=False) + xa = db.Column(db.Float, nullable=False) + xb = db.Column(db.Float, nullable=False) + ya = db.Column(db.Float, nullable=False) + yb = db.Column(db.Float, nullable=False) + + if CONF.database_url.startswith('mysql'): + LOG.debug('Using MySQL database backend') + image = db.Column(mysql.MEDIUMBLOB, nullable=True) + else: + image = db.Column(db.LargeBinary, nullable=True) + + generated_by = db.Column(db.String(256), nullable=True) + + def __repr__(self): + return '' % self.uuid + + +db.create_all() +manager = APIManager(app, flask_sqlalchemy_db=db) +connection = Connection(CONF.transport_url) + + +@app.route('/', methods=['GET']) +@app.route('/index', methods=['GET']) +@app.route('/index/', methods=['GET']) +def index(page=1): + fractals = Fractal.query.filter( + (Fractal.checksum != None) & (Fractal.size != None)).paginate( # noqa + page, 5, error_out=False) + return flask.render_template('index.html', fractals=fractals) + + +@app.route('/fractal/', methods=['GET']) +def get_fractal(fractalid): + fractal = Fractal.query.filter_by(uuid=fractalid).first() + if not fractal: + response = flask.jsonify({'code': 404, + 'message': 'Fracal not found'}) + response.status_code = 404 + else: + image_data = base64.b64decode(fractal.image) + image = Image.open(cStringIO.StringIO(image_data)) + output = cStringIO.StringIO() + image.save(output, "PNG") + image.seek(0) + response = flask.make_response(output.getvalue()) + response.content_type = "image/png" + + return response + + +def generate_fractal(**kwargs): + with producers[connection].acquire(block=True) as producer: + producer.publish(kwargs['result'], + serializer='json', + exchange=queues.task_exchange, + declare=[queues.task_exchange], + routing_key='normal') + + +def main(): + manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], + postprocessors={'POST': [generate_fractal]}, + exclude_columns=['image'], + url_prefix='/v1') + app.run(host=CONF.listen_address, port=CONF.bind_port) diff --git a/faafo/faafo/queues.py.bak b/faafo/faafo/queues.py.bak new file mode 100644 index 0000000..4e5a6fd --- /dev/null +++ b/faafo/faafo/queues.py.bak @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import kombu +from oslo_config import cfg + +task_exchange = kombu.Exchange('tasks', type='direct') +task_queue = kombu.Queue('normal', task_exchange, routing_key='normal') + +queues_opts = [ + cfg.StrOpt('transport-url', + default='amqp://guest:guest@localhost:5672//', + help='AMQP connection URL.') +] + +cfg.CONF.register_opts(queues_opts) + + +def list_opts(): + """Entry point for oslo-config-generator.""" + return [(None, copy.deepcopy(queues_opts))] diff --git a/faafo/faafo/version.py.bak b/faafo/faafo/version.py.bak new file mode 100644 index 0000000..7a68690 --- /dev/null +++ b/faafo/faafo/version.py.bak @@ -0,0 +1,15 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +version_info = pbr.version.VersionInfo('faafo') diff --git a/faafo/faafo/worker/__init__.py.bak b/faafo/faafo/worker/__init__.py.bak new file mode 100644 index 0000000..e69de29 diff --git a/faafo/setup.py.bak b/faafo/setup.py.bak new file mode 100644 index 0000000..ee06f22 --- /dev/null +++ b/faafo/setup.py.bak @@ -0,0 +1,29 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) -- 2.34.1 From d64e5eb3470682598e07c860741f8f6c5aa42e20 Mon Sep 17 00:00:00 2001 From: Usama Tahir Date: Tue, 8 Aug 2023 04:35:26 +0500 Subject: [PATCH 06/14] Updated install.sh --- demo1-getting-started.py.bak | 126 ------- demo3-microservice.py.bak | 276 -------------- demo4-scale-out-add-worker.py.bak | 190 ---------- demo4-scale-out.py.bak | 345 ------------------ demo5-1-durable-storage.py.bak | 123 ------- demo5-2-backup-fractals.py.bak | 97 ----- destroy-all-demo-instances.py.bak | 97 ----- faafo/bin/faafo-worker.bak | 52 --- faafo/bin/faafo.bak | 267 -------------- faafo/contrib/install.sh | 8 +- faafo/contrib/test_api.py.bak | 56 --- .../demo2-instance-with-init-script.py | 16 +- faafo/faafo/__init__.py.bak | 0 faafo/faafo/api/__init__.py.bak | 0 faafo/faafo/api/service.py.bak | 146 -------- faafo/faafo/queues.py.bak | 32 -- faafo/faafo/version.py.bak | 15 - faafo/faafo/worker/__init__.py.bak | 0 faafo/setup.py.bak | 29 -- 19 files changed, 12 insertions(+), 1863 deletions(-) delete mode 100644 demo1-getting-started.py.bak delete mode 100644 demo3-microservice.py.bak delete mode 100644 demo4-scale-out-add-worker.py.bak delete mode 100644 demo4-scale-out.py.bak delete mode 100644 demo5-1-durable-storage.py.bak delete mode 100644 demo5-2-backup-fractals.py.bak delete mode 100644 destroy-all-demo-instances.py.bak delete mode 100644 faafo/bin/faafo-worker.bak delete mode 100644 faafo/bin/faafo.bak delete mode 100644 faafo/contrib/test_api.py.bak rename demo2-instance-with-init-script.py.bak => faafo/demo2-instance-with-init-script.py (92%) delete mode 100644 faafo/faafo/__init__.py.bak delete mode 100644 faafo/faafo/api/__init__.py.bak delete mode 100644 faafo/faafo/api/service.py.bak delete mode 100644 faafo/faafo/queues.py.bak delete mode 100644 faafo/faafo/version.py.bak delete mode 100644 faafo/faafo/worker/__init__.py.bak delete mode 100644 faafo/setup.py.bak diff --git a/demo1-getting-started.py.bak b/demo1-getting-started.py.bak deleted file mode 100644 index 3030714..0000000 --- a/demo1-getting-started.py.bak +++ /dev/null @@ -1,126 +0,0 @@ -# Example for Cloud Computing Course Master AI / GSD -# -# uses libCloud: https://libcloud.apache.org/ -# libCloud API documentation: https://libcloud.readthedocs.io/en/latest/ -# OpenStack API documentation: https://developer.openstack.org/ -# this code was initially based on the former tutorial: https://developer.openstack.org/firstapp-libcloud/ - -import getpass - -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -######################################################################################################################## -# -# no changes necessary below this line in this example -# -######################################################################################################################## - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) -# A network in the project the started instance will be attached to -project_network = 'CloudComp' + str(group_number) + '-net' - -# The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -# domain_name = "default" - - -def main(): - # get the password from user - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - # instantiate a connection to the OpenStack private cloud - # make sure to include ex_force_auth_version='3.x_password', as needed in our environment - provider = get_driver(Provider.OPENSTACK) - - print("Opening connection to %s as %s..." % (auth_url, auth_username)) - - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name) - # ex_domain_name=domain_name) - - print("Getting images and selecting desired one...") - print("=========================================================================") - - # get a list of images offered in the cloud context (e.g. Ubuntu 20.04, cirros, ...) - images = conn.list_images() - image = '' - for img in images: - if img.name == ubuntu_image_name: - image = img - print(img) - - print("Getting flavors...") - print("=========================================================================") - - # get a list of flavors offered in the cloud context (e.g. m1.small, m1.medium, ...) - flavors = conn.list_sizes() - for flavor in flavors: - print(flavor) - - print("Selecting desired flavor...") - print("=========================================================================") - - # get the flavor with id 2 - flavor_id = '2' - flavor = conn.ex_get_size(flavor_id) - print(flavor) - - print("Selecting desired network...") - print("=========================================================================") - - # get a list of networks in the cloud context - networks = conn.ex_list_networks() - network = '' - for net in networks: - if net.name == project_network: - network = net - - print("Create instance 'testing'...") - print("=========================================================================") - - # create a new instance with the name "testing" - # make sure to provide networks (networks={network}) the instance should be attached to - instance_name = 'testing' - testing_instance = conn.create_node(name=instance_name, image=image, size=flavor, networks={network}) - print(testing_instance) - - print("Showing all running instances...") - print("=========================================================================") - - # show all instances (running nodes) in the cloud context - instances = conn.list_nodes() - for instance in instances: - print(instance) - - print("Destroying instance...") - print("=========================================================================") - - # destroy the instance we have just created - conn.destroy_node(testing_instance) - - -# method that is called when the script is started from the command line -if __name__ == '__main__': - main() diff --git a/demo3-microservice.py.bak b/demo3-microservice.py.bak deleted file mode 100644 index 4c1a5ad..0000000 --- a/demo3-microservice.py.bak +++ /dev/null @@ -1,276 +0,0 @@ -# import getpass -# import os - -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# reqs: -# services: nova, glance, neutron -# resources: 2 instances, 2 floating ips (1 keypair, 2 security groups) - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) -# A network in the project the started instance will be attached to -project_network = 'CloudComp' + str(group_number) + '-net' - -# The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? - -# The public key to be used for SSH connection, please make sure, that you have the corresponding private key -# -# id_rsa.pub should look like this (standard sshd pubkey format): -# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME - -keypair_name = 'srieger-pub' -pub_key_file = '~/.ssh/id_rsa.pub' - -flavor_name = 'm1.small' - - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -# domain_name = "default" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - # if "OS_PASSWORD" in os.environ: - # auth_password = os.environ["OS_PASSWORD"] - # else: - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - ########################################################################### - # - # create connection - # - ########################################################################### - - provider = get_driver(Provider.OPENSTACK) - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name) - # ex_domain_name=domain_name) - - ########################################################################### - # - # get image, flavor, network for instance creation - # - ########################################################################### - - images = conn.list_images() - image = '' - for img in images: - if img.name == ubuntu_image_name: - image = img - - flavors = conn.list_sizes() - flavor = '' - for flav in flavors: - if flav.name == flavor_name: - flavor = conn.ex_get_size(flav.id) - - networks = conn.ex_list_networks() - network = '' - for net in networks: - if net.name == project_network: - network = net - - ########################################################################### - # - # create keypair dependency - # - ########################################################################### - - print('Checking for existing SSH key pair...') - keypair_exists = False - for keypair in conn.list_key_pairs(): - if keypair.name == keypair_name: - keypair_exists = True - - if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') - else: - print('adding keypair...') - conn.import_key_pair_from_file(keypair_name, pub_key_file) - - for keypair in conn.list_key_pairs(): - print(keypair) - - ########################################################################### - # - # create security group dependency - # - ########################################################################### - - print('Checking for existing worker security group...') - security_group_name = 'worker' - security_group_exists = False - worker_security_group = '' - for security_group in conn.ex_list_security_groups(): - if security_group.name == security_group_name: - worker_security_group = security_group - security_group_exists = True - - if security_group_exists: - print('Worker Security Group ' + worker_security_group.name + ' already exists. Skipping creation.') - else: - worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') - conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) - - print('Checking for existing controller security group...') - security_group_name = 'control' - security_group_exists = False - controller_security_group = '' - for security_group in conn.ex_list_security_groups(): - if security_group.name == security_group_name: - controller_security_group = security_group - security_group_exists = True - - if security_group_exists: - print('Controller Security Group ' + controller_security_group.name + ' already exists. Skipping creation.') - else: - controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node') - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22) - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80) - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672, - source_security_group=worker_security_group) - - for security_group in conn.ex_list_security_groups(): - print(security_group) - - ########################################################################### - # - # create app-controller - # - ########################################################################### - - # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh - # is currently broken, hence the "rabbitctl" lines were added in the example - # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 - # - # Thanks to Stefan Friedmann for finding this fix ;) - - userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i messaging -i faafo -r api - rabbitmqctl add_user faafo guest - rabbitmqctl set_user_tags faafo administrator - rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" - ''' - - print('Starting new app-controller instance and wait until it is running...') - instance_controller_1 = conn.create_node(name='app-controller', - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata, - ex_security_groups=[controller_security_group]) - - conn.wait_until_running(nodes=[instance_controller_1], timeout=120, ssh_interface='private_ips') - - ########################################################################### - # - # assign app-controller floating ip - # - ########################################################################### - - print('Checking for unused Floating IP...') - unused_floating_ip = None - for floating_ip in conn.ex_list_floating_ips(): - if not floating_ip.node_id: - unused_floating_ip = floating_ip - break - - if not unused_floating_ip: - pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) - unused_floating_ip = pool.create_floating_ip() - - conn.ex_attach_floating_ip_to_node(instance_controller_1, unused_floating_ip) - print('Controller Application will be deployed to http://%s' % unused_floating_ip.ip_address) - - ########################################################################### - # - # getting id and ip address of app-controller instance - # - ########################################################################### - - # instance should not have a public ip? floating ips are assigned later - instance_controller_1 = conn.ex_get_node_details(instance_controller_1.id) - if instance_controller_1.public_ips: - ip_controller = instance_controller_1.public_ips[0] - else: - ip_controller = instance_controller_1.private_ips[0] - - ########################################################################### - # - # create app-worker-1 - # - ########################################################################### - - userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/' - ''' % {'ip_controller': ip_controller} - - print('Starting new app-worker-1 instance and wait until it is running...') - instance_worker_1 = conn.create_node(name='app-worker-1', - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata, - ex_security_groups=[worker_security_group]) - - conn.wait_until_running(nodes=[instance_worker_1], timeout=120, ssh_interface='private_ips') - - ########################################################################### - # - # assign app-worker floating ip - # - ########################################################################### - - print('Checking for unused Floating IP...') - unused_floating_ip = None - for floating_ip in conn.ex_list_floating_ips(): - if not floating_ip.node_id: - unused_floating_ip = floating_ip - break - - if not unused_floating_ip: - pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) - unused_floating_ip = pool.create_floating_ip() - - conn.ex_attach_floating_ip_to_node(instance_worker_1, unused_floating_ip) - print('The worker will be available for SSH at %s' % unused_floating_ip.ip_address) - - print('You can use ssh to login to the controller using your private key. After login, you can list available ' - 'fractals using "faafo list". To request the generation of new fractals, you can use "faafo create". ' - 'You can also see other options to use the faafo example cloud service using "faafo -h".') - - -if __name__ == '__main__': - main() diff --git a/demo4-scale-out-add-worker.py.bak b/demo4-scale-out-add-worker.py.bak deleted file mode 100644 index 14b40b1..0000000 --- a/demo4-scale-out-add-worker.py.bak +++ /dev/null @@ -1,190 +0,0 @@ -# import getpass -# import os -# import libcloud.security - -import time -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# reqs: -# services: nova, glance, neutron -# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) -# A network in the project the started instance will be attached to -project_network = 'CloudComp' + str(group_number) + '-net' - -# The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? - -# The public key to be used for SSH connection, please make sure, that you have the corresponding private key -# -# id_rsa.pub should look like this (standard sshd pubkey format): -# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME - -keypair_name = 'srieger-pub' -pub_key_file = '~/.ssh/id_rsa.pub' - -flavor_name = 'm1.small' - - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -domain_name = "default" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - # if "OS_PASSWORD" in os.environ: - # auth_password = os.environ["OS_PASSWORD"] - # else: - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - ########################################################################### - # - # create connection - # - ########################################################################### - - # libcloud.security.VERIFY_SSL_CERT = False - - provider = get_driver(Provider.OPENSTACK) - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name, - ex_domain_name=domain_name) - - ########################################################################### - # - # get image, flavor, network for instance creation - # - ########################################################################### - - images = conn.list_images() - image = '' - for img in images: - if img.name == ubuntu_image_name: - image = img - - flavors = conn.list_sizes() - flavor = '' - for flav in flavors: - if flav.name == flavor_name: - flavor = conn.ex_get_size(flav.id) - - networks = conn.ex_list_networks() - network = '' - for net in networks: - if net.name == project_network: - network = net - - ########################################################################### - # - # get fixed a ip for service and api instance - # (better would be shared IP for the cluster etc.) - # - ########################################################################### - - # find service instance - for instance in conn.list_nodes(): - if instance.name == 'app-services': - services_ip = instance.private_ips[0] - print('Found app-services fixed IP to be: ', services_ip) - if instance.name == 'app-api-1': - api_1_ip = instance.private_ips[0] - print('Found app-api-1 fixed IP to be: ', api_1_ip) - - ########################################################################### - # - # create keypair dependency - # - ########################################################################### - - print('Checking for existing SSH key pair...') - keypair_exists = False - for keypair in conn.list_key_pairs(): - if keypair.name == keypair_name: - keypair_exists = True - - if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') - else: - print('adding keypair...') - conn.import_key_pair_from_file(keypair_name, pub_key_file) - - for keypair in conn.list_key_pairs(): - print(keypair) - - ########################################################################### - # - # create security group dependency - # - ########################################################################### - - def get_security_group(connection, security_group_name): - """A helper function to check if security group already exists""" - print('Checking for existing ' + security_group_name + ' security group...') - for security_grp in connection.ex_list_security_groups(): - if security_grp.name == security_group_name: - print('Security Group ' + security_group_name + ' already exists. Skipping creation.') - return security_grp - return False - - if not get_security_group(conn, "worker"): - worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') - conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) - else: - worker_security_group = get_security_group(conn, "worker") - - for security_group in conn.ex_list_security_groups(): - print(security_group) - - ########################################################################### - # - # create worker instances - # - ########################################################################### - - userdata_worker = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' - ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} - - # userdata-api-2 = '''#!/usr/bin/env bash - # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' - # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} - - print('Starting new app-worker-3 instance and wait until it is running...') - instance_worker_3 = conn.create_node(name='app-worker-3', - image=image, size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_worker, - ex_security_groups=[worker_security_group]) - - -if __name__ == '__main__': - main() diff --git a/demo4-scale-out.py.bak b/demo4-scale-out.py.bak deleted file mode 100644 index 86e1a24..0000000 --- a/demo4-scale-out.py.bak +++ /dev/null @@ -1,345 +0,0 @@ -# import getpass -# import os -# import libcloud.security - -import time -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# reqs: -# services: nova, glance, neutron -# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) -# A network in the project the started instance will be attached to -project_network = 'CloudComp' + str(group_number) + '-net' - -# The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? - -# The public key to be used for SSH connection, please make sure, that you have the corresponding private key -# -# id_rsa.pub should look like this (standard sshd pubkey format): -# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME - -keypair_name = 'srieger-pub' -pub_key_file = '~/.ssh/id_rsa.pub' - -flavor_name = 'm1.small' - - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -domain_name = "default" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - # if "OS_PASSWORD" in os.environ: - # auth_password = os.environ["OS_PASSWORD"] - # else: - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - ########################################################################### - # - # create connection - # - ########################################################################### - - # libcloud.security.VERIFY_SSL_CERT = False - - provider = get_driver(Provider.OPENSTACK) - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name, - ex_domain_name=domain_name) - - ########################################################################### - # - # get image, flavor, network for instance creation - # - ########################################################################### - - images = conn.list_images() - image = '' - for img in images: - if img.name == ubuntu_image_name: - image = img - - flavors = conn.list_sizes() - flavor = '' - for flav in flavors: - if flav.name == flavor_name: - flavor = conn.ex_get_size(flav.id) - - networks = conn.ex_list_networks() - network = '' - for net in networks: - if net.name == project_network: - network = net - - ########################################################################### - # - # create keypair dependency - # - ########################################################################### - - print('Checking for existing SSH key pair...') - keypair_exists = False - for keypair in conn.list_key_pairs(): - if keypair.name == keypair_name: - keypair_exists = True - - if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') - else: - print('adding keypair...') - conn.import_key_pair_from_file(keypair_name, pub_key_file) - - for keypair in conn.list_key_pairs(): - print(keypair) - - ########################################################################### - # - # clean up resources from previous demos - # - ########################################################################### - - # destroy running demo instances - for instance in conn.list_nodes(): - if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', - 'app-services', 'app-api-1', 'app-api-2']: - print('Destroying Instance: %s' % instance.name) - conn.destroy_node(instance) - - # wait until all nodes are destroyed to be able to remove depended security groups - nodes_still_running = True - while nodes_still_running: - nodes_still_running = False - time.sleep(3) - instances = conn.list_nodes() - for instance in instances: - # if we see any demo instances still running continue to wait for them to stop - if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-controller']: - nodes_still_running = True - print('There are still instances running, waiting for them to be destroyed...') - - # delete security groups - for group in conn.ex_list_security_groups(): - if group.name in ['control', 'worker', 'api', 'services']: - print('Deleting security group: %s' % group.name) - conn.ex_delete_security_group(group) - - ########################################################################### - # - # create security group dependency - # - ########################################################################### - - def get_security_group(connection, security_group_name): - """A helper function to check if security group already exists""" - print('Checking for existing ' + security_group_name + ' security group...') - for security_grp in connection.ex_list_security_groups(): - if security_grp.name == security_group_name: - print('Security Group ' + security_group_name + ' already exists. Skipping creation.') - return worker_security_group - return False - - if not get_security_group(conn, "api"): - api_security_group = conn.ex_create_security_group('api', 'for API services only') - conn.ex_create_security_group_rule(api_security_group, 'TCP', 80, 80) - conn.ex_create_security_group_rule(api_security_group, 'TCP', 22, 22) - else: - api_security_group = get_security_group(conn, "api") - - if not get_security_group(conn, "worker"): - worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node') - conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22) - else: - worker_security_group = get_security_group(conn, "worker") - - if not get_security_group(conn, "control"): - controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node') - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22) - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80) - conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672, - source_security_group=worker_security_group) - - if not get_security_group(conn, "services"): - services_security_group = conn.ex_create_security_group('services', 'for DB and AMQP services only') - conn.ex_create_security_group_rule(services_security_group, 'TCP', 22, 22) - conn.ex_create_security_group_rule(services_security_group, 'TCP', 3306, 3306, - source_security_group=api_security_group) - conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672, - source_security_group=worker_security_group) - conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672, - source_security_group=api_security_group) - else: - services_security_group = get_security_group(conn, "services") - - for security_group in conn.ex_list_security_groups(): - print(security_group) - - ########################################################################### - # - # get floating ip helper function - # - ########################################################################### - - def get_floating_ip(connection): - """A helper function to re-use available Floating IPs""" - unused_floating_ip = None - for float_ip in connection.ex_list_floating_ips(): - if not float_ip.node_id: - unused_floating_ip = float_ip - break - if not unused_floating_ip: - pool = connection.ex_list_floating_ip_pools()[0] - unused_floating_ip = pool.create_floating_ip() - return unused_floating_ip - - ########################################################################### - # - # create app-services instance (database & messaging) - # - ########################################################################### - - # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh - # is currently broken, hence the "rabbitctl" lines were added in the example - # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 - # - # Thanks to Stefan Friedmann for finding this fix ;) - - userdata_service = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i database -i messaging - rabbitmqctl add_user faafo guest - rabbitmqctl set_user_tags faafo administrator - rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" - ''' - - print('Starting new app-services instance and wait until it is running...') - instance_services = conn.create_node(name='app-services', - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_service, - ex_security_groups=[services_security_group]) - instance_services = conn.wait_until_running(nodes=[instance_services], timeout=120, - ssh_interface='private_ips')[0][0] - services_ip = instance_services.private_ips[0] - - ########################################################################### - # - # create app-api instances - # - ########################################################################### - - userdata_api = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \ - -d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo' - ''' % {'services_ip': services_ip} - - print('Starting new app-api-1 instance and wait until it is running...') - instance_api_1 = conn.create_node(name='app-api-1', - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_api, - ex_security_groups=[api_security_group]) - - print('Starting new app-api-2 instance and wait until it is running...') - instance_api_2 = conn.create_node(name='app-api-2', - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_api, - ex_security_groups=[api_security_group]) - - instance_api_1 = conn.wait_until_running(nodes=[instance_api_1], timeout=120, - ssh_interface='private_ips')[0][0] - api_1_ip = instance_api_1.private_ips[0] - instance_api_2 = conn.wait_until_running(nodes=[instance_api_2], timeout=120, - ssh_interface='private_ips')[0][0] - # api_2_ip = instance_api_2.private_ips[0] - - for instance in [instance_api_1, instance_api_2]: - floating_ip = get_floating_ip(conn) - conn.ex_attach_floating_ip_to_node(instance, floating_ip) - print('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name}) - - ########################################################################### - # - # create worker instances - # - ########################################################################### - - userdata_worker = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' - ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} - - # userdata_api-api-2 = '''#!/usr/bin/env bash - # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' - # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} - - print('Starting new app-worker-1 instance and wait until it is running...') - instance_worker_1 = conn.create_node(name='app-worker-1', - image=image, size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_worker, - ex_security_groups=[worker_security_group]) - - print('Starting new app-worker-2 instance and wait until it is running...') - instance_worker_2 = conn.create_node(name='app-worker-2', - image=image, size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata_worker, - ex_security_groups=[worker_security_group]) - - # do not start worker 3 initially, can be started using scale-out-add-worker.py demo - - #print('Starting new app-worker-3 instance and wait until it is running...') - #instance_worker_3 = conn.create_node(name='app-worker-3', - # image=image, size=flavor, - # networks=[network], - # ex_keyname=keypair_name, - # ex_userdata=userdata_worker, - # ex_security_groups=[worker_security_group]) - - print(instance_worker_1) - print(instance_worker_2) - #print(instance_worker_3) - - -if __name__ == '__main__': - main() diff --git a/demo5-1-durable-storage.py.bak b/demo5-1-durable-storage.py.bak deleted file mode 100644 index 8ffc581..0000000 --- a/demo5-1-durable-storage.py.bak +++ /dev/null @@ -1,123 +0,0 @@ -from __future__ import print_function - -import getpass -import os - -import libcloud.security -from libcloud.storage.providers import get_driver -from libcloud.storage.types import Provider - -# reqs: -# services: nova, glance, neutron -# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) - -# HS-Fulda Private Cloud -auth_url = 'https://192.168.72.40:5000' -region_name = 'RegionOne' -domain_name = "hsfulda" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - if "OS_PROJECT_NAME" in os.environ: - project_name = os.environ["OS_PROJECT_NAME"] - else: - project_name = input("Enter your OpenStack project:") - - if "OS_USERNAME" in os.environ: - auth_username = os.environ["OS_USERNAME"] - else: - auth_username = input("Enter your OpenStack username:") - - if "OS_PASSWORD" in os.environ: - auth_password = os.environ["OS_PASSWORD"] - else: - auth_password = getpass.getpass("Enter your OpenStack password:") - - ########################################################################### - # - # create connection - # - ########################################################################### - - libcloud.security.VERIFY_SSL_CERT = False - - provider = get_driver(Provider.OPENSTACK_SWIFT) - swift = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name, - ex_domain_name=domain_name) - - ########################################################################### - # - # create container - # - ########################################################################### - - container_name = 'fractals' - containers = swift.list_containers() - container = False - for con in containers: - if con.name == container_name: - container = con - - if not container: - container = swift.create_container(container_name=container_name) - - print(container) - - print(swift.list_containers()) - - ########################################################################### - # - # upload a goat - # - ########################################################################### - - object_name = 'an amazing goat' - file_path = 'C:\\Users\\Sebastian\\goat.jpg' - objects = container.list_objects() - object_data = False - for obj in objects: - if obj.name == object_name: - object_data = obj - - if not object_data: - # print(os.getcwd()) - container = swift.get_container(container_name=container_name) - object_data = container.upload_object(file_path=file_path, object_name=object_name) - - objects = container.list_objects() - print(objects) - - ########################################################################### - # - # check goat integrity - # - ########################################################################### - - import hashlib - print(hashlib.md5(open(file_path, 'rb').read()).hexdigest()) - - ########################################################################### - # - # delete goat - # - ########################################################################### - - swift.delete_object(object_data) - - objects = container.list_objects() - print(objects) - - -if __name__ == '__main__': - main() diff --git a/demo5-2-backup-fractals.py.bak b/demo5-2-backup-fractals.py.bak deleted file mode 100644 index ff49805..0000000 --- a/demo5-2-backup-fractals.py.bak +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import print_function - -import getpass -import json -import os - -import libcloud -import libcloud.security -import requests -from libcloud.storage.providers import get_driver -from libcloud.storage.types import Provider - -# HS-Fulda Private Cloud -auth_url = 'https://192.168.72.40:5000' -region_name = 'RegionOne' -domain_name = "hsfulda" - -api_ip = '192.168.72.102' - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - if "OS_PROJECT_NAME" in os.environ: - project_name = os.environ["OS_PROJECT_NAME"] - else: - project_name = input("Enter your OpenStack project:") - - if "OS_USERNAME" in os.environ: - auth_username = os.environ["OS_USERNAME"] - else: - auth_username = input("Enter your OpenStack username:") - - if "OS_PASSWORD" in os.environ: - auth_password = os.environ["OS_PASSWORD"] - else: - auth_password = getpass.getpass("Enter your OpenStack password:") - - ########################################################################### - # - # create connection - # - ########################################################################### - - libcloud.security.VERIFY_SSL_CERT = False - - provider = get_driver(Provider.OPENSTACK_SWIFT) - swift = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name, - ex_domain_name=domain_name) - - ########################################################################### - # - # create container - # - ########################################################################### - - container_name = 'fractals' - containers = swift.list_containers() - container = False - for con in containers: - if con.name == container_name: - container = con - - if not container: - container = swift.create_container(container_name=container_name) - - print(container) - - ########################################################################### - # - # backup existing fractals to container - # - ########################################################################### - - endpoint = 'http://' + api_ip - params = { 'results_per_page': '-1' } - response = requests.get('%s/v1/fractal' % endpoint, params=params) - data = json.loads(response.text) - for fractal in data['objects']: - response = requests.get('%s/fractal/%s' % (endpoint, fractal['uuid']), stream=True) - container.upload_object_via_stream(response.iter_content(), object_name=fractal['uuid']) - - for object_data in container.list_objects(): - print(object_data) - - -if __name__ == '__main__': - main() diff --git a/destroy-all-demo-instances.py.bak b/destroy-all-demo-instances.py.bak deleted file mode 100644 index c3b1204..0000000 --- a/destroy-all-demo-instances.py.bak +++ /dev/null @@ -1,97 +0,0 @@ -# import getpass -# import os -# import libcloud.security - -import time -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# reqs: -# services: nova, glance, neutron -# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups) - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) - - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -domain_name = "default" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - # if "OS_PASSWORD" in os.environ: - # auth_password = os.environ["OS_PASSWORD"] - # else: - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - ########################################################################### - # - # create connection - # - ########################################################################### - - # libcloud.security.VERIFY_SSL_CERT = False - - provider = get_driver(Provider.OPENSTACK) - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name, - ex_domain_name=domain_name) - - ########################################################################### - # - # clean up resources from previous demos - # - ########################################################################### - - # destroy running demo instances - for instance in conn.list_nodes(): - if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', - 'app-services', 'app-api-1', 'app-api-2']: - print('Destroying Instance: %s' % instance.name) - conn.destroy_node(instance) - - # wait until all nodes are destroyed to be able to remove depended security groups - nodes_still_running = True - while nodes_still_running: - nodes_still_running = False - time.sleep(3) - instances = conn.list_nodes() - for instance in instances: - # if we see any demo instances still running continue to wait for them to stop - if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller', - 'app-services', 'app-api-1', 'app-api-2']: - nodes_still_running = True - print('There are still instances running, waiting for them to be destroyed...') - - # delete security groups - for group in conn.ex_list_security_groups(): - if group.name in ['control', 'worker', 'api', 'services']: - print('Deleting security group: %s' % group.name) - conn.ex_delete_security_group(group) - - -if __name__ == '__main__': - main() diff --git a/faafo/bin/faafo-worker.bak b/faafo/bin/faafo-worker.bak deleted file mode 100644 index 67daf0b..0000000 --- a/faafo/bin/faafo-worker.bak +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -import kombu -from oslo_config import cfg -from oslo_log import log - -from faafo.worker import service as worker -from faafo import version - -LOG = log.getLogger('faafo.worker') -CONF = cfg.CONF - -# If ../faafo/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'faafo', '__init__.py')): - sys.path.insert(0, possible_topdir) - -if __name__ == '__main__': - log.register_options(CONF) - log.set_defaults() - - CONF(project='worker', prog='faafo-worker', - default_config_files=['/etc/faafo/faafo.conf'], - version=version.version_info.version_string()) - - log.setup(CONF, 'worker', - version=version.version_info.version_string()) - - connection = kombu.Connection(CONF.transport_url) - server = worker.Worker(connection) - try: - server.run() - except KeyboardInterrupt: - LOG.info("Caught keyboard interrupt. Exiting.") diff --git a/faafo/bin/faafo.bak b/faafo/bin/faafo.bak deleted file mode 100644 index a12770d..0000000 --- a/faafo/bin/faafo.bak +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import json -import random -import uuid - -from oslo_config import cfg -from oslo_log import log -from prettytable import PrettyTable -import requests - -from faafo import version - - -LOG = log.getLogger('faafo.client') -CONF = cfg.CONF - - -def get_random_task(): - random.seed() - - if CONF.command.width: - width = int(CONF.command.width) - else: - width = random.randint(int(CONF.command.min_width), - int(CONF.command.max_width)) - - if CONF.command.height: - height = int(CONF.command.height) - else: - height = random.randint(int(CONF.command.min_height), - int(CONF.command.max_height)) - - if CONF.command.iterations: - iterations = int(CONF.command.iterations) - else: - iterations = random.randint(int(CONF.command.min_iterations), - int(CONF.command.max_iterations)) - - if CONF.command.xa: - xa = float(CONF.command.xa) - else: - xa = random.uniform(float(CONF.command.min_xa), - float(CONF.command.max_xa)) - - if CONF.command.xb: - xb = float(CONF.command.xb) - else: - xb = random.uniform(float(CONF.command.min_xb), - float(CONF.command.max_xb)) - - if CONF.command.ya: - ya = float(CONF.command.ya) - else: - ya = random.uniform(float(CONF.command.min_ya), - float(CONF.command.max_ya)) - - if CONF.command.yb: - yb = float(CONF.command.yb) - else: - yb = random.uniform(float(CONF.command.min_yb), - float(CONF.command.max_yb)) - - task = { - 'uuid': str(uuid.uuid4()), - 'width': width, - 'height': height, - 'iterations': iterations, 'xa': xa, - 'xb': xb, - 'ya': ya, - 'yb': yb - } - - return task - - -def do_get_fractal(): - LOG.error("command 'download' not yet implemented") - - -def do_show_fractal(): - LOG.info("showing fractal %s" % CONF.command.uuid) - result = requests.get("%s/v1/fractal/%s" % - (CONF.endpoint_url, CONF.command.uuid)) - if result.status_code == 200: - data = json.loads(result.text) - output = PrettyTable(["Parameter", "Value"]) - output.align["Parameter"] = "l" - output.align["Value"] = "l" - output.add_row(["uuid", data['uuid']]) - output.add_row(["duration", "%f seconds" % data['duration']]) - output.add_row(["dimensions", "%d x %d pixels" % - (data['width'], data['height'])]) - output.add_row(["iterations", data['iterations']]) - output.add_row(["xa", data['xa']]) - output.add_row(["xb", data['xb']]) - output.add_row(["ya", data['ya']]) - output.add_row(["yb", data['yb']]) - output.add_row(["size", "%d bytes" % data['size']]) - output.add_row(["checksum", data['checksum']]) - output.add_row(["generated_by", data['generated_by']]) - print(output) - else: - LOG.error("fractal '%s' not found" % CONF.command.uuid) - - -def do_list_fractals(): - LOG.info("listing all fractals") - - fractals = get_fractals() - output = PrettyTable(["UUID", "Dimensions", "Filesize"]) - for fractal in fractals: - output.add_row([ - fractal["uuid"], - "%d x %d pixels" % (fractal["width"], fractal["height"]), - "%d bytes" % (fractal["size"] or 0), - ]) - print(output) - - -def get_fractals(page=1): - result = requests.get("%s/v1/fractal?page=%d" % - (CONF.endpoint_url, page)) - - fractals = [] - if result.status_code == 200: - data = json.loads(result.text) - if page < data['total_pages']: - fractals = data['objects'] + get_fractals(page + 1) - else: - return data['objects'] - - return fractals - - -def do_delete_fractal(): - LOG.info("deleting fractal %s" % CONF.command.uuid) - result = requests.delete("%s/v1/fractal/%s" % - (CONF.endpoint_url, CONF.command.uuid)) - LOG.debug("result: %s" %result) - - -def do_create_fractal(): - random.seed() - if CONF.command.tasks: - number = int(CONF.command.tasks) - else: - number = random.randint(int(CONF.command.min_tasks), - int(CONF.command.max_tasks)) - LOG.info("generating %d task(s)" % number) - for i in xrange(0, number): - task = get_random_task() - LOG.debug("created task %s" % task) - # NOTE(berendt): only necessary when using requests < 2.4.2 - headers = {'Content-type': 'application/json', - 'Accept': 'text/plain'} - requests.post("%s/v1/fractal" % CONF.endpoint_url, - json.dumps(task), headers=headers) - - -def add_command_parsers(subparsers): - parser = subparsers.add_parser('create') - parser.set_defaults(func=do_create_fractal) - parser.add_argument("--height", default=None, - help="The height of the generate image.") - parser.add_argument("--min-height", default=256, - help="The minimum height of the generate image.") - parser.add_argument("--max-height", default=1024, - help="The maximum height of the generate image.") - parser.add_argument("--width", default=None, - help="The width of the generated image.") - parser.add_argument("--min-width", default=256, - help="The minimum width of the generated image.") - parser.add_argument("--max-width", default=1024, - help="The maximum width of the generated image.") - parser.add_argument("--iterations", default=None, - help="The number of iterations.") - parser.add_argument("--min-iterations", default=128, - help="The minimum number of iterations.") - parser.add_argument("--max-iterations", default=512, - help="The maximum number of iterations.") - parser.add_argument("--tasks", default=None, - help="The number of generated fractals.") - parser.add_argument("--min-tasks", default=1, - help="The minimum number of generated fractals.") - parser.add_argument("--max-tasks", default=10, - help="The maximum number of generated fractals.") - parser.add_argument("--xa", default=None, - help="The value for the parameter 'xa'.") - parser.add_argument("--min-xa", default=-1.0, - help="The minimum value for the parameter 'xa'.") - parser.add_argument("--max-xa", default=-4.0, - help="The maximum value for the parameter 'xa'.") - parser.add_argument("--xb", default=None, - help="The value for the parameter 'xb'.") - parser.add_argument("--min-xb", default=1.0, - help="The minimum value for the parameter 'xb'.") - parser.add_argument("--max-xb", default=4.0, - help="The maximum value for the parameter 'xb'.") - parser.add_argument("--ya", default=None, - help="The value for the parameter 'ya'.") - parser.add_argument("--min-ya", default=-0.5, - help="The minimum value for the parameter 'ya'.") - parser.add_argument("--max-ya", default=-3, - help="The maximum value for the parameter 'ya'.") - parser.add_argument("--yb", default=None, - help="The value for the parameter 'yb'.") - parser.add_argument("--min-yb", default=0.5, - help="The minimum value for the parameter 'yb'.") - parser.add_argument("--max-yb", default=3, - help="The maximum value for the parameter 'yb'.") - - parser = subparsers.add_parser('delete') - parser.set_defaults(func=do_delete_fractal) - parser.add_argument("uuid", help="Fractal to delete.") - - parser = subparsers.add_parser('show') - parser.set_defaults(func=do_show_fractal) - parser.add_argument("uuid", help="Fractal to show.") - - parser = subparsers.add_parser('get') - parser.set_defaults(func=do_get_fractal) - parser.add_argument("uuid", help="Fractal to download.") - - parser = subparsers.add_parser('list') - parser.set_defaults(func=do_list_fractals) - - -client_commands = cfg.SubCommandOpt('command', title='Commands', - help='Show available commands.', - handler=add_command_parsers) - -CONF.register_cli_opts([client_commands]) - -client_cli_opts = [ - cfg.StrOpt('endpoint-url', - default='http://localhost', - help='API connection URL') -] - -CONF.register_cli_opts(client_cli_opts) - - -if __name__ == '__main__': - log.register_options(CONF) - log.set_defaults() - - CONF(project='client', prog='faafo-client', - version=version.version_info.version_string()) - - log.setup(CONF, 'client', - version=version.version_info.version_string()) - - CONF.command.func() diff --git a/faafo/contrib/install.sh b/faafo/contrib/install.sh index 03c9228..e921a32 100644 --- a/faafo/contrib/install.sh +++ b/faafo/contrib/install.sh @@ -83,13 +83,13 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_DATABASE -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then - sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server python-mysqldb + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server python3-mysqldb # HSFD changes for Ubuntu 18.04 sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf #sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf sudo service mysql restart elif [[ $ID = 'fedora' ]]; then - sudo dnf install -y mariadb-server python-mysql + sudo dnf install -y mariadb-server python3-mysql printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf sudo systemctl enable mariadb sudo systemctl start mariadb @@ -117,7 +117,7 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_FAAFO -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then - sudo apt-get install -y python-dev python-pip supervisor git zlib1g-dev libmysqlclient-dev python-mysqldb + sudo apt-get install -y python3-dev python3-pip supervisor git zlib1g-dev libmysqlclient-dev python3-mysqldb # Following is needed because of # https://bugs.launchpad.net/ubuntu/+source/supervisor/+bug/1594740 if [ $(lsb_release --short --codename) = xenial ]; then @@ -131,7 +131,7 @@ if [[ -e /etc/os-release ]]; then fi fi elif [[ $ID = 'fedora' ]]; then - sudo dnf install -y python-devel python-pip supervisor git zlib-devel mariadb-devel gcc which python-mysql + sudo dnf install -y python3-devel python3-pip supervisor git zlib-devel mariadb-devel gcc which python3-mysql sudo systemctl enable supervisord sudo systemctl start supervisord #elif [[ $ID = 'opensuse' || $ID = 'sles' ]]; then diff --git a/faafo/contrib/test_api.py.bak b/faafo/contrib/test_api.py.bak deleted file mode 100644 index 559ac3d..0000000 --- a/faafo/contrib/test_api.py.bak +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import requests - -url = 'http://127.0.0.1/api/fractal' -headers = {'Content-Type': 'application/json'} - -uuid = '13bf15a8-9f6c-4d59-956f-7d20f7484687' -data = { - 'uuid': uuid, - 'width': 100, - 'height': 100, - 'iterations': 10, - 'xa': 1.0, - 'xb': -1.0, - 'ya': 1.0, - 'yb': -1.0, -} -response = requests.post(url, data=json.dumps(data), headers=headers) -assert response.status_code == 201 - -response = requests.get(url, headers=headers) -assert response.status_code == 200 -print(response.json()) - -response = requests.get(url + '/' + uuid, headers=headers) -assert response.status_code == 200 -print(response.json()) - -data = { - 'checksum': 'c6fef4ef13a577066c2281b53c82ce2c7e94e', - 'duration': 10.12 -} -response = requests.put(url + '/' + uuid, data=json.dumps(data), - headers=headers) -assert response.status_code == 200 - -response = requests.get(url + '/' + uuid, headers=headers) -assert response.status_code == 200 -print(response.json()) - -response = requests.delete(url + '/' + uuid, headers=headers) -assert response.status_code == 204 diff --git a/demo2-instance-with-init-script.py.bak b/faafo/demo2-instance-with-init-script.py similarity index 92% rename from demo2-instance-with-init-script.py.bak rename to faafo/demo2-instance-with-init-script.py index 25142dc..d7f7cc9 100644 --- a/demo2-instance-with-init-script.py.bak +++ b/faafo/demo2-instance-with-init-script.py @@ -104,7 +104,7 @@ def main(): keypair_exists = True if keypair_exists: - print('Keypair ' + keypair_name + ' already exists. Skipping import.') + print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) else: print('adding keypair...') conn.import_key_pair_from_file(keypair_name, pub_key_file) @@ -128,7 +128,7 @@ def main(): security_group_exists = True if security_group_exists: - print('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.') + print(('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.')) else: all_in_one_security_group = conn.ex_create_security_group(security_group_name, 'network access for all-in-one application.') @@ -159,7 +159,7 @@ def main(): instance_exists = True if instance_exists: - print('Instance ' + testing_instance.name + ' already exists. Skipping creation.') + print(('Instance ' + testing_instance.name + ' already exists. Skipping creation.')) exit() else: print('Starting new all-in-one instance and wait until it is running...') @@ -181,12 +181,12 @@ def main(): private_ip = None if len(testing_instance.private_ips): private_ip = testing_instance.private_ips[0] - print('Private IP found: {}'.format(private_ip)) + print(('Private IP found: {}'.format(private_ip))) public_ip = None if len(testing_instance.public_ips): public_ip = testing_instance.public_ips[0] - print('Public IP found: {}'.format(public_ip)) + print(('Public IP found: {}'.format(public_ip))) print('Checking for unused Floating IP...') unused_floating_ip = None @@ -197,11 +197,11 @@ def main(): if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()): pool = conn.ex_list_floating_ip_pools()[0] - print('Allocating new Floating IP from pool: {}'.format(pool)) + print(('Allocating new Floating IP from pool: {}'.format(pool))) unused_floating_ip = pool.create_floating_ip() if public_ip: - print('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.') + print(('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.')) elif unused_floating_ip: conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip) @@ -214,7 +214,7 @@ def main(): actual_ip_address = private_ip print('\n') - print('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address)) + print(('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address))) print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n' 'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@" if your private\n' diff --git a/faafo/faafo/__init__.py.bak b/faafo/faafo/__init__.py.bak deleted file mode 100644 index e69de29..0000000 diff --git a/faafo/faafo/api/__init__.py.bak b/faafo/faafo/api/__init__.py.bak deleted file mode 100644 index e69de29..0000000 diff --git a/faafo/faafo/api/service.py.bak b/faafo/faafo/api/service.py.bak deleted file mode 100644 index a3093c7..0000000 --- a/faafo/faafo/api/service.py.bak +++ /dev/null @@ -1,146 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import copy -import cStringIO -from pkg_resources import resource_filename - -import flask -from flask_restless import APIManager -from flask_sqlalchemy import SQLAlchemy -from flask_bootstrap import Bootstrap -from kombu import Connection -from kombu.pools import producers -from oslo_config import cfg -from oslo_log import log -from PIL import Image -from sqlalchemy.dialects import mysql - -from faafo import queues -from faafo import version - -LOG = log.getLogger('faafo.api') -CONF = cfg.CONF - -api_opts = [ - cfg.StrOpt('listen-address', - default='0.0.0.0', - help='Listen address.'), - cfg.IntOpt('bind-port', - default='80', - help='Bind port.'), - cfg.StrOpt('database-url', - default='sqlite:////tmp/sqlite.db', - help='Database connection URL.') -] - -CONF.register_opts(api_opts) - -log.register_options(CONF) -log.set_defaults() - -CONF(project='api', prog='faafo-api', - default_config_files=['/etc/faafo/faafo.conf'], - version=version.version_info.version_string()) - -log.setup(CONF, 'api', - version=version.version_info.version_string()) - -template_path = resource_filename(__name__, "templates") -app = flask.Flask('faafo.api', template_folder=template_path) -app.config['DEBUG'] = CONF.debug -app.config['SQLALCHEMY_DATABASE_URI'] = CONF.database_url -db = SQLAlchemy(app) -Bootstrap(app) - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(None, copy.deepcopy(api_opts))] - - -class Fractal(db.Model): - uuid = db.Column(db.String(36), primary_key=True) - checksum = db.Column(db.String(256), unique=True) - url = db.Column(db.String(256), nullable=True) - duration = db.Column(db.Float) - size = db.Column(db.Integer, nullable=True) - width = db.Column(db.Integer, nullable=False) - height = db.Column(db.Integer, nullable=False) - iterations = db.Column(db.Integer, nullable=False) - xa = db.Column(db.Float, nullable=False) - xb = db.Column(db.Float, nullable=False) - ya = db.Column(db.Float, nullable=False) - yb = db.Column(db.Float, nullable=False) - - if CONF.database_url.startswith('mysql'): - LOG.debug('Using MySQL database backend') - image = db.Column(mysql.MEDIUMBLOB, nullable=True) - else: - image = db.Column(db.LargeBinary, nullable=True) - - generated_by = db.Column(db.String(256), nullable=True) - - def __repr__(self): - return '' % self.uuid - - -db.create_all() -manager = APIManager(app, flask_sqlalchemy_db=db) -connection = Connection(CONF.transport_url) - - -@app.route('/', methods=['GET']) -@app.route('/index', methods=['GET']) -@app.route('/index/', methods=['GET']) -def index(page=1): - fractals = Fractal.query.filter( - (Fractal.checksum != None) & (Fractal.size != None)).paginate( # noqa - page, 5, error_out=False) - return flask.render_template('index.html', fractals=fractals) - - -@app.route('/fractal/', methods=['GET']) -def get_fractal(fractalid): - fractal = Fractal.query.filter_by(uuid=fractalid).first() - if not fractal: - response = flask.jsonify({'code': 404, - 'message': 'Fracal not found'}) - response.status_code = 404 - else: - image_data = base64.b64decode(fractal.image) - image = Image.open(cStringIO.StringIO(image_data)) - output = cStringIO.StringIO() - image.save(output, "PNG") - image.seek(0) - response = flask.make_response(output.getvalue()) - response.content_type = "image/png" - - return response - - -def generate_fractal(**kwargs): - with producers[connection].acquire(block=True) as producer: - producer.publish(kwargs['result'], - serializer='json', - exchange=queues.task_exchange, - declare=[queues.task_exchange], - routing_key='normal') - - -def main(): - manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], - postprocessors={'POST': [generate_fractal]}, - exclude_columns=['image'], - url_prefix='/v1') - app.run(host=CONF.listen_address, port=CONF.bind_port) diff --git a/faafo/faafo/queues.py.bak b/faafo/faafo/queues.py.bak deleted file mode 100644 index 4e5a6fd..0000000 --- a/faafo/faafo/queues.py.bak +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import kombu -from oslo_config import cfg - -task_exchange = kombu.Exchange('tasks', type='direct') -task_queue = kombu.Queue('normal', task_exchange, routing_key='normal') - -queues_opts = [ - cfg.StrOpt('transport-url', - default='amqp://guest:guest@localhost:5672//', - help='AMQP connection URL.') -] - -cfg.CONF.register_opts(queues_opts) - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(None, copy.deepcopy(queues_opts))] diff --git a/faafo/faafo/version.py.bak b/faafo/faafo/version.py.bak deleted file mode 100644 index 7a68690..0000000 --- a/faafo/faafo/version.py.bak +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('faafo') diff --git a/faafo/faafo/worker/__init__.py.bak b/faafo/faafo/worker/__init__.py.bak deleted file mode 100644 index e69de29..0000000 diff --git a/faafo/setup.py.bak b/faafo/setup.py.bak deleted file mode 100644 index ee06f22..0000000 --- a/faafo/setup.py.bak +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) -- 2.34.1 From 4bfb746cad0d4a3eb4c11c81dfbe8a0db950c7bd Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Mon, 25 Sep 2023 13:23:22 +0200 Subject: [PATCH 07/14] changed Ubuntu to 20.04, made python3 the default while installing faafo, added comment to show use of different faafo branches for cloud-init --- demo1-getting-started.py | 4 ++-- demo2-instance-with-init-script.py | 10 +++++++--- demo3-microservice.py | 12 ++++++++---- demo4-scale-out-add-worker.py | 13 +++++++++---- demo4-scale-out.py | 16 ++++++++++------ faafo/contrib/install-aws.sh | 7 +++++++ faafo/contrib/install.sh | 13 ++++++++++--- 7 files changed, 53 insertions(+), 22 deletions(-) diff --git a/demo1-getting-started.py b/demo1-getting-started.py index d7ef9cb..c07183f 100644 --- a/demo1-getting-started.py +++ b/demo1-getting-started.py @@ -32,8 +32,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? +#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # default region region_name = 'RegionOne' diff --git a/demo2-instance-with-init-script.py b/demo2-instance-with-init-script.py index d7f7cc9..ae27c11 100644 --- a/demo2-instance-with-init-script.py +++ b/demo2-instance-with-init-script.py @@ -20,8 +20,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? +#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -144,8 +144,12 @@ def main(): # ########################################################################### + #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' + # testing / faafo dev branch: + hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' + userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i faafo -i messaging -r api -r worker -r demo ''' diff --git a/demo3-microservice.py b/demo3-microservice.py index d54cda7..9d4a1aa 100644 --- a/demo3-microservice.py +++ b/demo3-microservice.py @@ -24,8 +24,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? +#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -171,8 +171,12 @@ def main(): # # Thanks to Stefan Friedmann for finding this fix ;) + #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' + # testing / faafo dev branch: + hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' + userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i messaging -i faafo -r api rabbitmqctl add_user faafo guest rabbitmqctl set_user_tags faafo administrator @@ -231,7 +235,7 @@ def main(): ########################################################################### userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/' ''' % {'ip_controller': ip_controller} diff --git a/demo4-scale-out-add-worker.py b/demo4-scale-out-add-worker.py index d092b59..9add7bd 100644 --- a/demo4-scale-out-add-worker.py +++ b/demo4-scale-out-add-worker.py @@ -26,8 +26,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? +#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -167,13 +167,17 @@ def main(): # ########################################################################### + #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' + # testing / faafo dev branch: + hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' + userdata_worker = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} # userdata-api-2 = '''#!/usr/bin/env bash - # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + # curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} @@ -185,6 +189,7 @@ def main(): ex_userdata=userdata_worker, ex_security_groups=[worker_security_group]) + print(instance_worker_3) if __name__ == '__main__': main() diff --git a/demo4-scale-out.py b/demo4-scale-out.py index 4e60d1a..56a189d 100644 --- a/demo4-scale-out.py +++ b/demo4-scale-out.py @@ -26,8 +26,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? +#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -232,8 +232,12 @@ def main(): # # Thanks to Stefan Friedmann for finding this fix ;) + #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' + # testing / faafo dev branch: + hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' + userdata_service = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i database -i messaging rabbitmqctl add_user faafo guest rabbitmqctl set_user_tags faafo administrator @@ -259,7 +263,7 @@ def main(): ########################################################################### userdata_api = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \ -d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo' ''' % {'services_ip': services_ip} @@ -301,12 +305,12 @@ def main(): ########################################################################### userdata_worker = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ -i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' ''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip} # userdata_api-api-2 = '''#!/usr/bin/env bash - # curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ + # curl -L -s ''' + hsfd_faafo_cloud_init_script + ''' | bash -s -- \ # -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/' # ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip} diff --git a/faafo/contrib/install-aws.sh b/faafo/contrib/install-aws.sh index 9f402f1..309f916 100644 --- a/faafo/contrib/install-aws.sh +++ b/faafo/contrib/install-aws.sh @@ -78,6 +78,7 @@ if [[ -e /etc/os-release ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get update elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf update -y fi @@ -89,6 +90,7 @@ if [[ -e /etc/os-release ]]; then #sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf sudo service mysql restart elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y mariadb-server python-mysql printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf sudo systemctl enable mariadb @@ -106,6 +108,7 @@ if [[ -e /etc/os-release ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get install -y rabbitmq-server elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y rabbitmq-server sudo systemctl enable rabbitmq-server sudo systemctl start rabbitmq-server @@ -117,6 +120,7 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_FAAFO -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then + # TODO: needs to be updated for Ubuntu >= 20.04 sudo apt-get install -y python-dev python-pip supervisor git zlib1g-dev libmysqlclient-dev python-mysqldb # Following is needed because of # https://bugs.launchpad.net/ubuntu/+source/supervisor/+bug/1594740 @@ -131,6 +135,7 @@ if [[ -e /etc/os-release ]]; then fi fi elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y python-devel python-pip supervisor git zlib-devel mariadb-devel gcc which python-mysql sudo systemctl enable supervisord sudo systemctl start supervisord @@ -171,6 +176,7 @@ startretries=0" if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then echo "$faafo_api" | sudo tee -a /etc/supervisor/conf.d/faafo.conf elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported echo "$faafo_api" | sudo tee -a /etc/supervisord.d/faafo.ini else echo "error: distribution $ID not supported" @@ -188,6 +194,7 @@ startretries=0" if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then echo "$faafo_worker" | sudo tee -a /etc/supervisor/conf.d/faafo.conf elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported echo "$faafo_worker" | sudo tee -a /etc/supervisord.d/faafo.ini else echo "error: distribution $ID not supported" diff --git a/faafo/contrib/install.sh b/faafo/contrib/install.sh index e921a32..0ac9f8c 100644 --- a/faafo/contrib/install.sh +++ b/faafo/contrib/install.sh @@ -78,6 +78,7 @@ if [[ -e /etc/os-release ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get update elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf update -y fi @@ -89,6 +90,7 @@ if [[ -e /etc/os-release ]]; then #sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf sudo service mysql restart elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y mariadb-server python3-mysql printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf sudo systemctl enable mariadb @@ -106,6 +108,7 @@ if [[ -e /etc/os-release ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get install -y rabbitmq-server elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y rabbitmq-server sudo systemctl enable rabbitmq-server sudo systemctl start rabbitmq-server @@ -117,7 +120,7 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_FAAFO -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then - sudo apt-get install -y python3-dev python3-pip supervisor git zlib1g-dev libmysqlclient-dev python3-mysqldb + sudo apt-get install -y python3-dev python3-pip supervisor git zlib1g-dev libmysqlclient-dev python3-mysqldb python-is-python3 # Following is needed because of # https://bugs.launchpad.net/ubuntu/+source/supervisor/+bug/1594740 if [ $(lsb_release --short --codename) = xenial ]; then @@ -131,6 +134,7 @@ if [[ -e /etc/os-release ]]; then fi fi elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported sudo dnf install -y python3-devel python3-pip supervisor git zlib-devel mariadb-devel gcc which python3-mysql sudo systemctl enable supervisord sudo systemctl start supervisord @@ -142,12 +146,13 @@ if [[ -e /etc/os-release ]]; then fi # HSFD changed to local repo - git clone https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples + #git clone https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples + git clone --branch dev_faafo https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples cd cloud-computing-msc-ai-examples/faafo # following line required by bug 1636150 sudo pip install --upgrade pbr sudo pip install -r requirements.txt - sudo python setup.py install + sudo python3 setup.py install sudo sed -i -e "s#transport_url = .*#transport_url = $URL_MESSAGING#" /etc/faafo/faafo.conf sudo sed -i -e "s#database_url = .*#database_url = $URL_DATABASE#" /etc/faafo/faafo.conf @@ -165,6 +170,7 @@ startretries=0" if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then echo "$faafo_api" | sudo tee -a /etc/supervisor/conf.d/faafo.conf elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported echo "$faafo_api" | sudo tee -a /etc/supervisord.d/faafo.ini else echo "error: distribution $ID not supported" @@ -182,6 +188,7 @@ startretries=0" if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then echo "$faafo_worker" | sudo tee -a /etc/supervisor/conf.d/faafo.conf elif [[ $ID = 'fedora' ]]; then + # fedora currently not tested nor supported echo "$faafo_worker" | sudo tee -a /etc/supervisord.d/faafo.ini else echo "error: distribution $ID not supported" -- 2.34.1 From a50038ffd5e5f0b2b7840a050c3f6ffc336cc0dc Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Mon, 25 Sep 2023 15:38:22 +0200 Subject: [PATCH 08/14] removed superfluous demo2 startup file, added app_context to prevent flask from crashing, changed group_number to default again, added default pubkey --- demo1-getting-started.py | 2 +- demo2-instance-with-init-script.py | 5 +- demo3-microservice.py | 5 +- demo4-scale-out-add-worker.py | 5 +- demo4-scale-out.py | 5 +- destroy-all-demo-instances.py | 2 +- faafo/demo2-instance-with-init-script.py | 230 ----------------------- faafo/faafo/api/service.py | 18 +- faafo/requirements.txt | 6 +- faafo/setup.cfg | 3 +- 10 files changed, 31 insertions(+), 250 deletions(-) delete mode 100644 faafo/demo2-instance-with-init-script.py diff --git a/demo1-getting-started.py b/demo1-getting-started.py index c07183f..a1c59ef 100644 --- a/demo1-getting-started.py +++ b/demo1-getting-started.py @@ -13,7 +13,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X ######################################################################################################################## diff --git a/demo2-instance-with-init-script.py b/demo2-instance-with-init-script.py index ae27c11..6497b44 100644 --- a/demo2-instance-with-init-script.py +++ b/demo2-instance-with-init-script.py @@ -7,7 +7,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X # web service endpoint of the private cloud infrastructure @@ -28,7 +28,8 @@ ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # id_rsa.pub should look like this (standard sshd pubkey format): # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME -keypair_name = 'CloudComp30-keypair' +#keypair_name = 'CloudComp30-keypair' +keypair_name = "srieger-pub" pub_key_file = '~/.ssh/id_rsa.pub' flavor_name = 'm1.small' diff --git a/demo3-microservice.py b/demo3-microservice.py index 9d4a1aa..4f2661a 100644 --- a/demo3-microservice.py +++ b/demo3-microservice.py @@ -11,7 +11,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X # web service endpoint of the private cloud infrastructure @@ -32,7 +32,8 @@ ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # id_rsa.pub should look like this (standard sshd pubkey format): # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME -keypair_name = 'srieger-pub' +#keypair_name = 'CloudComp30-keypair' +keypair_name = "srieger-pub" pub_key_file = '~/.ssh/id_rsa.pub' flavor_name = 'm1.small' diff --git a/demo4-scale-out-add-worker.py b/demo4-scale-out-add-worker.py index 9add7bd..bc73745 100644 --- a/demo4-scale-out-add-worker.py +++ b/demo4-scale-out-add-worker.py @@ -13,7 +13,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X # web service endpoint of the private cloud infrastructure @@ -34,7 +34,8 @@ ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # id_rsa.pub should look like this (standard sshd pubkey format): # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME -keypair_name = 'srieger-pub' +#keypair_name = 'CloudComp30-keypair' +keypair_name = "srieger-pub" pub_key_file = '~/.ssh/id_rsa.pub' flavor_name = 'm1.small' diff --git a/demo4-scale-out.py b/demo4-scale-out.py index 56a189d..7ee3f07 100644 --- a/demo4-scale-out.py +++ b/demo4-scale-out.py @@ -13,7 +13,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X # web service endpoint of the private cloud infrastructure @@ -34,7 +34,8 @@ ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" # id_rsa.pub should look like this (standard sshd pubkey format): # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME -keypair_name = 'srieger-pub' +#keypair_name = 'CloudComp30-keypair' +keypair_name = "srieger-pub" pub_key_file = '~/.ssh/id_rsa.pub' flavor_name = 'm1.small' diff --git a/destroy-all-demo-instances.py b/destroy-all-demo-instances.py index 8146c3a..fe4aeb9 100644 --- a/destroy-all-demo-instances.py +++ b/destroy-all-demo-instances.py @@ -13,7 +13,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = 30 +group_number = X # web service endpoint of the private cloud infrastructure diff --git a/faafo/demo2-instance-with-init-script.py b/faafo/demo2-instance-with-init-script.py deleted file mode 100644 index d7f7cc9..0000000 --- a/faafo/demo2-instance-with-init-script.py +++ /dev/null @@ -1,230 +0,0 @@ -# import getpass -# import os - -from libcloud.compute.providers import get_driver -from libcloud.compute.types import Provider - -# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, -# project etc., as coordinated in the lab sessions) - -group_number = 30 - - -# web service endpoint of the private cloud infrastructure -auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000' -# your username in OpenStack -auth_username = 'CloudComp' + str(group_number) -# your project in OpenStack -project_name = 'CloudComp' + str(group_number) -# A network in the project the started instance will be attached to -project_network = 'CloudComp' + str(group_number) + '-net' - -# The image to look for and use for the started instance -ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example? - -# The public key to be used for SSH connection, please make sure, that you have the corresponding private key -# -# id_rsa.pub should look like this (standard sshd pubkey format): -# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME - -keypair_name = 'CloudComp30-keypair' -pub_key_file = '~/.ssh/id_rsa.pub' - -flavor_name = 'm1.small' - - -# default region -region_name = 'RegionOne' -# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username -# domain_name = "default" - - -def main(): - ########################################################################### - # - # get credentials - # - ########################################################################### - - # if "OS_PASSWORD" in os.environ: - # auth_password = os.environ["OS_PASSWORD"] - # else: - # auth_password = getpass.getpass("Enter your OpenStack password:") - auth_password = "demo" - - ########################################################################### - # - # create connection - # - ########################################################################### - - provider = get_driver(Provider.OPENSTACK) - conn = provider(auth_username, - auth_password, - ex_force_auth_url=auth_url, - ex_force_auth_version='3.x_password', - ex_tenant_name=project_name, - ex_force_service_region=region_name) - # ex_domain_name=domain_name) - - ########################################################################### - # - # get image, flavor, network for instance creation - # - ########################################################################### - images = conn.list_images() - image = '' - for img in images: - if img.name == ubuntu_image_name: - image = img - - flavors = conn.list_sizes() - flavor = '' - for flav in flavors: - if flav.name == flavor_name: - flavor = conn.ex_get_size(flav.id) - - networks = conn.ex_list_networks() - network = '' - for net in networks: - if net.name == project_network: - network = net - - ########################################################################### - # - # create keypair dependency - # - ########################################################################### - - print('Checking for existing SSH key pair...') - keypair_exists = False - for keypair in conn.list_key_pairs(): - if keypair.name == keypair_name: - keypair_exists = True - - if keypair_exists: - print(('Keypair ' + keypair_name + ' already exists. Skipping import.')) - else: - print('adding keypair...') - conn.import_key_pair_from_file(keypair_name, pub_key_file) - - for keypair in conn.list_key_pairs(): - print(keypair) - - ########################################################################### - # - # create security group dependency - # - ########################################################################### - - print('Checking for existing security group...') - security_group_name = 'all-in-one' - security_group_exists = False - all_in_one_security_group = '' - for security_group in conn.ex_list_security_groups(): - if security_group.name == security_group_name: - all_in_one_security_group = security_group - security_group_exists = True - - if security_group_exists: - print(('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.')) - else: - all_in_one_security_group = conn.ex_create_security_group(security_group_name, - 'network access for all-in-one application.') - conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 80, 80) - conn.ex_create_security_group_rule(all_in_one_security_group, 'TCP', 22, 22) - - for security_group in conn.ex_list_security_groups(): - print(security_group) - - ########################################################################### - # - # create all-in-one instance - # - ########################################################################### - - userdata = '''#!/usr/bin/env bash - curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \ - -i faafo -i messaging -r api -r worker -r demo - ''' - - print('Checking for existing instance...') - instance_name = 'all-in-one' - instance_exists = False - testing_instance = '' - for instance in conn.list_nodes(): - if instance.name == instance_name: - testing_instance = instance - instance_exists = True - - if instance_exists: - print(('Instance ' + testing_instance.name + ' already exists. Skipping creation.')) - exit() - else: - print('Starting new all-in-one instance and wait until it is running...') - testing_instance = conn.create_node(name=instance_name, - image=image, - size=flavor, - networks=[network], - ex_keyname=keypair_name, - ex_userdata=userdata, - ex_security_groups=[all_in_one_security_group]) - conn.wait_until_running(nodes=[testing_instance], timeout=120, ssh_interface='private_ips') - - ########################################################################### - # - # assign all-in-one instance floating ip - # - ########################################################################### - - private_ip = None - if len(testing_instance.private_ips): - private_ip = testing_instance.private_ips[0] - print(('Private IP found: {}'.format(private_ip))) - - public_ip = None - if len(testing_instance.public_ips): - public_ip = testing_instance.public_ips[0] - print(('Public IP found: {}'.format(public_ip))) - - print('Checking for unused Floating IP...') - unused_floating_ip = None - for floating_ip in conn.ex_list_floating_ips(): - if not floating_ip.node_id: - unused_floating_ip = floating_ip - break - - if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()): - pool = conn.ex_list_floating_ip_pools()[0] - print(('Allocating new Floating IP from pool: {}'.format(pool))) - unused_floating_ip = pool.create_floating_ip() - - if public_ip: - print(('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.')) - elif unused_floating_ip: - conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip) - - actual_ip_address = None - if public_ip: - actual_ip_address = public_ip - elif unused_floating_ip: - actual_ip_address = unused_floating_ip.ip_address - elif private_ip: - actual_ip_address = private_ip - - print('\n') - print(('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address))) - - print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n' - 'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@" if your private\n' - 'key is in the default location.\n\n' - 'After login, you can list or "ssh ubuntu@" available fractals using "faafo list". To request\n' - 'the generation of new fractals, you can use "faafo create".\n\n' - 'You can also see other options to use the faafo example cloud service using "faafo -h".\n\n' - 'If you cannot start faafo command and/or do not see the webpage, you can check the Instance Console Log of\n' - 'the instance, e.g., in OpenStack web interface.') - - -if __name__ == '__main__': - main() diff --git a/faafo/faafo/api/service.py b/faafo/faafo/api/service.py index 0a96f91..2ce438a 100644 --- a/faafo/faafo/api/service.py +++ b/faafo/faafo/api/service.py @@ -60,7 +60,10 @@ template_path = resource_filename(__name__, "templates") app = flask.Flask('faafo.api', template_folder=template_path) app.config['DEBUG'] = CONF.debug app.config['SQLALCHEMY_DATABASE_URI'] = CONF.database_url -db = SQLAlchemy(app) + +with app.app_context(): + db = SQLAlchemy(app) + Bootstrap(app) @@ -95,7 +98,9 @@ class Fractal(db.Model): return '' % self.uuid -db.create_all() +with app.app_context(): + db.create_all() + manager = APIManager(app, flask_sqlalchemy_db=db) connection = Connection(CONF.transport_url) @@ -139,8 +144,9 @@ def generate_fractal(**kwargs): def main(): - manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], - postprocessors={'POST': [generate_fractal]}, - exclude_columns=['image'], - url_prefix='/v1') + with app.app_context(): + manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], + postprocessors={'POST': [generate_fractal]}, + exclude_columns=['image'], + url_prefix='/v1') app.run(host=CONF.listen_address, port=CONF.bind_port) diff --git a/faafo/requirements.txt b/faafo/requirements.txt index d2c9d2c..c996f94 100644 --- a/faafo/requirements.txt +++ b/faafo/requirements.txt @@ -4,8 +4,10 @@ positional iso8601 anyjson>=0.3.3 eventlet>=0.17.4 -PyMySQL>=0.6.2,<0.7 # 0.7 design change breaks faafo, MIT License -SQLAlchemy>1.3,<1.4 # 1.4 breaks faafo list +#PyMySQL>=0.6.2,<0.7 # 0.7 design change breaks faafo, MIT License +PyMySQL>=0.6.2 +#SQLAlchemy>1.3,<1.4 # 1.4 breaks faafo list +SQLAlchemy>=2.0.16 Pillow==2.4.0 # MIT requests>=2.5.2 Flask-Bootstrap diff --git a/faafo/setup.cfg b/faafo/setup.cfg index 47e621e..df7cb1b 100644 --- a/faafo/setup.cfg +++ b/faafo/setup.cfg @@ -13,8 +13,7 @@ classifier = License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 [files] packages = -- 2.34.1 From 0101ae4b97ae42b94bf39238c522eba3ba347f85 Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Wed, 27 Sep 2023 10:53:18 +0200 Subject: [PATCH 09/14] updated deps for flask-restless-ng and SQLAlchemy --- faafo/faafo/api/service.py | 8 ++++---- faafo/requirements.txt | 35 +++++++++++++++++++++++------------ 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/faafo/faafo/api/service.py b/faafo/faafo/api/service.py index 2ce438a..f0c7178 100644 --- a/faafo/faafo/api/service.py +++ b/faafo/faafo/api/service.py @@ -101,7 +101,7 @@ class Fractal(db.Model): with app.app_context(): db.create_all() -manager = APIManager(app, flask_sqlalchemy_db=db) +manager = APIManager(app=app, session=db.session) connection = Connection(CONF.transport_url) @@ -110,8 +110,8 @@ connection = Connection(CONF.transport_url) @app.route('/index/', methods=['GET']) def index(page=1): fractals = Fractal.query.filter( - (Fractal.checksum != None) & (Fractal.size != None)).paginate( # noqa - page, 5, error_out=False) + (Fractal.checksum != None) & (Fractal.size != None)).paginate( + page=page, per_page=5) return flask.render_template('index.html', fractals=fractals) @@ -147,6 +147,6 @@ def main(): with app.app_context(): manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], postprocessors={'POST': [generate_fractal]}, - exclude_columns=['image'], + exclude=['image'], url_prefix='/v1') app.run(host=CONF.listen_address, port=CONF.bind_port) diff --git a/faafo/requirements.txt b/faafo/requirements.txt index c996f94..2f9abf2 100644 --- a/faafo/requirements.txt +++ b/faafo/requirements.txt @@ -1,20 +1,31 @@ -pbr>=1.6 +#pbr>=1.6 +pbr pytz positional iso8601 -anyjson>=0.3.3 -eventlet>=0.17.4 +#anyjson>=0.3.3 +anyjson +#eventlet>=0.17.4 +eventlet #PyMySQL>=0.6.2,<0.7 # 0.7 design change breaks faafo, MIT License -PyMySQL>=0.6.2 +#PyMySQL>=0.6.2 +PyMySQL #SQLAlchemy>1.3,<1.4 # 1.4 breaks faafo list -SQLAlchemy>=2.0.16 -Pillow==2.4.0 # MIT -requests>=2.5.2 +#SQLAlchemy>=2.0.16 +SQLAlchemy +#Pillow==2.4.0 # MIT +Pillow +#requests>=2.5.2 +requests Flask-Bootstrap Flask -flask-restless +flask-restless-ng flask-sqlalchemy -oslo.config>=2.3.0 # Apache-2.0 -oslo.log>=1.8.0 # Apache-2.0 -PrettyTable>=0.7,<0.8 -kombu>=3.0.7 +#oslo.config>=2.3.0 # Apache-2.0 +#oslo.log>=1.8.0 # Apache-2.0 +#PrettyTable>=0.7,<0.8 +#kombu>=3.0.7 +oslo.config +oslo.log +PrettyTable +kombu \ No newline at end of file -- 2.34.1 From df64887c19158ef4ab2315044dfb0a845b28220c Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Mon, 18 Mar 2024 11:24:30 +0000 Subject: [PATCH 10/14] fixed api to use recent version of flask-restless-ng --- faafo/bin/faafo | 88 +++++++++++++++++----------- faafo/contrib/install-aws.sh | 4 ++ faafo/contrib/install.sh | 4 ++ faafo/faafo/api/service.py | 28 ++++++--- faafo/faafo/api/templates/index.html | 10 ++++ faafo/faafo/worker/service.py | 31 ++++++---- 6 files changed, 112 insertions(+), 53 deletions(-) mode change 100644 => 100755 faafo/bin/faafo mode change 100644 => 100755 faafo/contrib/install-aws.sh mode change 100644 => 100755 faafo/contrib/install.sh diff --git a/faafo/bin/faafo b/faafo/bin/faafo old mode 100644 new mode 100755 index a90f50f..eceee54 --- a/faafo/bin/faafo +++ b/faafo/bin/faafo @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import copy import json import random import uuid @@ -75,13 +74,19 @@ def get_random_task(): float(CONF.command.max_yb)) task = { - 'uuid': str(uuid.uuid4()), - 'width': width, - 'height': height, - 'iterations': iterations, 'xa': xa, - 'xb': xb, - 'ya': ya, - 'yb': yb + 'data': { + 'type': 'fractal', + 'attributes': { + 'uuid': str(uuid.uuid4()), + 'width': width, + 'height': height, + 'iterations': iterations, + 'xa': xa, + 'xb': xb, + 'ya': ya, + 'yb': yb + } + } } return task @@ -93,25 +98,31 @@ def do_get_fractal(): def do_show_fractal(): LOG.info("showing fractal %s" % CONF.command.uuid) + headers = {'Content-Type': 'application/vnd.api+json', + 'Accept': 'application/vnd.api+json'} result = requests.get("%s/v1/fractal/%s" % - (CONF.endpoint_url, CONF.command.uuid)) + (CONF.endpoint_url, CONF.command.uuid), + headers=headers) + LOG.debug("result: %s" % result.text) + if result.status_code == 200: data = json.loads(result.text) + fractal_data = data['data']['attributes'] output = PrettyTable(["Parameter", "Value"]) output.align["Parameter"] = "l" output.align["Value"] = "l" - output.add_row(["uuid", data['uuid']]) - output.add_row(["duration", "%f seconds" % data['duration']]) + output.add_row(["uuid", fractal_data['uuid']]) + output.add_row(["duration", "%f seconds" % fractal_data['duration']]) output.add_row(["dimensions", "%d x %d pixels" % - (data['width'], data['height'])]) - output.add_row(["iterations", data['iterations']]) - output.add_row(["xa", data['xa']]) - output.add_row(["xb", data['xb']]) - output.add_row(["ya", data['ya']]) - output.add_row(["yb", data['yb']]) - output.add_row(["size", "%d bytes" % data['size']]) - output.add_row(["checksum", data['checksum']]) - output.add_row(["generated_by", data['generated_by']]) + (fractal_data['width'], fractal_data['height'])]) + output.add_row(["iterations", fractal_data['iterations']]) + output.add_row(["xa", fractal_data['xa']]) + output.add_row(["xb", fractal_data['xb']]) + output.add_row(["ya", fractal_data['ya']]) + output.add_row(["yb", fractal_data['yb']]) + output.add_row(["size", "%d bytes" % fractal_data['size']]) + output.add_row(["checksum", fractal_data['checksum']]) + output.add_row(["generated_by", fractal_data['generated_by']]) print(output) else: LOG.error("fractal '%s' not found" % CONF.command.uuid) @@ -123,34 +134,43 @@ def do_list_fractals(): fractals = get_fractals() output = PrettyTable(["UUID", "Dimensions", "Filesize"]) for fractal in fractals: + fractal_data = fractal['attributes'] output.add_row([ - fractal["uuid"], - "%d x %d pixels" % (fractal["width"], fractal["height"]), - "%d bytes" % (fractal["size"] or 0), + fractal_data["uuid"], + "%d x %d pixels" % (fractal_data["width"], fractal_data["height"]), + "%d bytes" % (fractal_data["size"] or 0), ]) print(output) def get_fractals(page=1): - result = requests.get("%s/v1/fractal?page=%d" % - (CONF.endpoint_url, page)) + headers = {'Content-Type': 'application/vnd.api+json', + 'Accept': 'application/vnd.api+json'} + result = requests.get("%s/v1/fractal?page=%d&page[size]=10" % + (CONF.endpoint_url, page), + headers=headers) + LOG.debug("result: %s" % result.text) + fractals = [] if result.status_code == 200: data = json.loads(result.text) - if page < data['total_pages']: - fractals = data['objects'] + get_fractals(page + 1) + if (page * 10) < data['meta']['total']: + fractals = data['data'] + get_fractals(page + 1) else: - return data['objects'] + return data['data'] return fractals def do_delete_fractal(): LOG.info("deleting fractal %s" % CONF.command.uuid) + headers = {'Content-Type': 'application/vnd.api+json', + 'Accept': 'application/vnd.api+json'} result = requests.delete("%s/v1/fractal/%s" % - (CONF.endpoint_url, CONF.command.uuid)) - LOG.debug("result: %s" %result) + (CONF.endpoint_url, CONF.command.uuid), + headers=headers) + LOG.debug("result: %s" % result.text) def do_create_fractal(): @@ -164,11 +184,11 @@ def do_create_fractal(): for i in range(0, number): task = get_random_task() LOG.debug("created task %s" % task) - # NOTE(berendt): only necessary when using requests < 2.4.2 - headers = {'Content-type': 'application/json', - 'Accept': 'text/plain'} - requests.post("%s/v1/fractal" % CONF.endpoint_url, + headers = {'Content-Type': 'application/vnd.api+json', + 'Accept': 'application/vnd.api+json'} + resp = requests.post("%s/v1/fractal" % CONF.endpoint_url, json.dumps(task), headers=headers) + LOG.debug("resp: %s" % resp.text) def add_command_parsers(subparsers): diff --git a/faafo/contrib/install-aws.sh b/faafo/contrib/install-aws.sh old mode 100644 new mode 100755 index 309f916..b7515a5 --- a/faafo/contrib/install-aws.sh +++ b/faafo/contrib/install-aws.sh @@ -107,6 +107,10 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_MESSAGING -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get install -y rabbitmq-server + # fixes for rabbitmq setup + sudo rabbitmqctl add_user faafo guest + sudo rabbitmqctl set_user_tags faafo administrator + sudo rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" elif [[ $ID = 'fedora' ]]; then # fedora currently not tested nor supported sudo dnf install -y rabbitmq-server diff --git a/faafo/contrib/install.sh b/faafo/contrib/install.sh old mode 100644 new mode 100755 index 0ac9f8c..b763407 --- a/faafo/contrib/install.sh +++ b/faafo/contrib/install.sh @@ -107,6 +107,10 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_MESSAGING -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then sudo apt-get install -y rabbitmq-server + # fixes for rabbitmq setup + sudo rabbitmqctl add_user faafo guest + sudo rabbitmqctl set_user_tags faafo administrator + sudo rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*" elif [[ $ID = 'fedora' ]]; then # fedora currently not tested nor supported sudo dnf install -y rabbitmq-server diff --git a/faafo/faafo/api/service.py b/faafo/faafo/api/service.py index f0c7178..00f2e98 100644 --- a/faafo/faafo/api/service.py +++ b/faafo/faafo/api/service.py @@ -13,6 +13,7 @@ import base64 import copy import io +import socket from pkg_resources import resource_filename import flask @@ -109,10 +110,11 @@ connection = Connection(CONF.transport_url) @app.route('/index', methods=['GET']) @app.route('/index/', methods=['GET']) def index(page=1): + hostname = socket.gethostname() fractals = Fractal.query.filter( (Fractal.checksum != None) & (Fractal.size != None)).paginate( page=page, per_page=5) - return flask.render_template('index.html', fractals=fractals) + return flask.render_template('index.html', fractals=fractals, hostname=hostname) @app.route('/fractal/', methods=['GET']) @@ -124,8 +126,8 @@ def get_fractal(fractalid): response.status_code = 404 else: image_data = base64.b64decode(fractal.image) - image = Image.open(io.StringIO(image_data)) - output = io.StringIO() + image = Image.open(io.BytesIO(image_data)) + output = io.BytesIO() image.save(output, "PNG") image.seek(0) response = flask.make_response(output.getvalue()) @@ -135,6 +137,7 @@ def get_fractal(fractalid): def generate_fractal(**kwargs): + print("Postprocessor called!" + str(kwargs)) with producers[connection].acquire(block=True) as producer: producer.publish(kwargs['result'], serializer='json', @@ -142,11 +145,22 @@ def generate_fractal(**kwargs): declare=[queues.task_exchange], routing_key='normal') +def convert_image_to_binary(**kwargs): + print("Preprocessor call: " + str(kwargs)) + if 'image' in kwargs['data']['data']['attributes']: + print("Converting image to binary...") + kwargs['data']['data']['attributes']['image'] = str(kwargs['data']['data']['attributes']['image']).encode("ascii") + #print("Preprocessor called!" + str(kwargs)) + #return kwargs def main(): + print("Starting API server - new...") with app.app_context(): - manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'], - postprocessors={'POST': [generate_fractal]}, + manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PATCH'], + postprocessors={'POST_RESOURCE': [generate_fractal]}, + preprocessors={'PATCH_RESOURCE': [convert_image_to_binary]}, exclude=['image'], - url_prefix='/v1') - app.run(host=CONF.listen_address, port=CONF.bind_port) + url_prefix='/v1', + allow_client_generated_ids=True) + app.run(host=CONF.listen_address, port=CONF.bind_port, debug=True) + diff --git a/faafo/faafo/api/templates/index.html b/faafo/faafo/api/templates/index.html index cc4a44a..acc946f 100644 --- a/faafo/faafo/api/templates/index.html +++ b/faafo/faafo/api/templates/index.html @@ -57,4 +57,14 @@ yb = {{ fractal.yb }} {% endfor %} {{render_pagination(fractals)}} + +
+ Rendered by server: {{hostname}} +
{% endblock %} diff --git a/faafo/faafo/worker/service.py b/faafo/faafo/worker/service.py index d1bb193..0ab6ce6 100644 --- a/faafo/faafo/worker/service.py +++ b/faafo/faafo/worker/service.py @@ -110,7 +110,8 @@ class Worker(ConsumerMixin): accept=['json'], callbacks=[self.process])] - def process(self, task, message): + def process(self, task_def, message): + task = task_def['data']['attributes'] LOG.info("processing task %s" % task['uuid']) LOG.debug(task) start_time = time.time() @@ -136,21 +137,27 @@ class Worker(ConsumerMixin): LOG.debug("removed temporary file %s" % filename) result = { - 'uuid': task['uuid'], - 'duration': elapsed_time, - 'image': image, - 'checksum': checksum, - 'size': size, - 'generated_by': socket.gethostname() + 'data': { + 'type': 'fractal', + 'id': task['uuid'], + 'attributes': { + 'uuid': task['uuid'], + 'duration': elapsed_time, + 'image': image.decode("ascii"), + 'checksum': checksum, + 'size': size, + 'generated_by': socket.gethostname() + } + } } - # NOTE(berendt): only necessary when using requests < 2.4.2 - headers = {'Content-type': 'application/json', - 'Accept': 'text/plain'} - - requests.put("%s/v1/fractal/%s" % + headers = {'Content-Type': 'application/vnd.api+json', + 'Accept': 'application/vnd.api+json'} + + resp = requests.patch("%s/v1/fractal/%s" % (CONF.endpoint_url, str(task['uuid'])), json.dumps(result), headers=headers) + LOG.debug("Result: %s" % resp.text) message.ack() return result -- 2.34.1 From 05d363ca67322070453f1d63aa6ee75787ec9f24 Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Mon, 18 Mar 2024 14:38:45 +0100 Subject: [PATCH 11/14] removed anyjson, refactoring based on pylint recommendations --- faafo/faafo/api/service.py | 16 ++++++++-------- faafo/faafo/worker/service.py | 13 ++++++++----- faafo/requirements.txt | 2 +- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/faafo/faafo/api/service.py b/faafo/faafo/api/service.py index 00f2e98..8843999 100644 --- a/faafo/faafo/api/service.py +++ b/faafo/faafo/api/service.py @@ -112,7 +112,7 @@ connection = Connection(CONF.transport_url) def index(page=1): hostname = socket.gethostname() fractals = Fractal.query.filter( - (Fractal.checksum != None) & (Fractal.size != None)).paginate( + (Fractal.checksum is not None) & (Fractal.size is not None)).paginate( page=page, per_page=5) return flask.render_template('index.html', fractals=fractals, hostname=hostname) @@ -137,7 +137,7 @@ def get_fractal(fractalid): def generate_fractal(**kwargs): - print("Postprocessor called!" + str(kwargs)) + LOG.debug("Postprocessor called!" + str(kwargs)) with producers[connection].acquire(block=True) as producer: producer.publish(kwargs['result'], serializer='json', @@ -145,13 +145,14 @@ def generate_fractal(**kwargs): declare=[queues.task_exchange], routing_key='normal') + def convert_image_to_binary(**kwargs): - print("Preprocessor call: " + str(kwargs)) + LOG.debug("Preprocessor call: " + str(kwargs)) if 'image' in kwargs['data']['data']['attributes']: - print("Converting image to binary...") - kwargs['data']['data']['attributes']['image'] = str(kwargs['data']['data']['attributes']['image']).encode("ascii") - #print("Preprocessor called!" + str(kwargs)) - #return kwargs + LOG.debug("Converting image to binary...") + kwargs['data']['data']['attributes']['image'] = \ + str(kwargs['data']['data']['attributes']['image']).encode("ascii") + def main(): print("Starting API server - new...") @@ -163,4 +164,3 @@ def main(): url_prefix='/v1', allow_client_generated_ids=True) app.run(host=CONF.listen_address, port=CONF.bind_port, debug=True) - diff --git a/faafo/faafo/worker/service.py b/faafo/faafo/worker/service.py index 0ab6ce6..edd4784 100644 --- a/faafo/faafo/worker/service.py +++ b/faafo/faafo/worker/service.py @@ -19,12 +19,13 @@ import copy import hashlib import json import os -from PIL import Image import random import socket import tempfile import time +from PIL import Image + from kombu.mixins import ConsumerMixin from oslo_config import cfg from oslo_log import log @@ -152,11 +153,13 @@ class Worker(ConsumerMixin): } headers = {'Content-Type': 'application/vnd.api+json', - 'Accept': 'application/vnd.api+json'} - + 'Accept': 'application/vnd.api+json'} + resp = requests.patch("%s/v1/fractal/%s" % - (CONF.endpoint_url, str(task['uuid'])), - json.dumps(result), headers=headers) + (CONF.endpoint_url, str(task['uuid'])), + json.dumps(result), + headers=headers, + timeout=30) LOG.debug("Result: %s" % resp.text) message.ack() diff --git a/faafo/requirements.txt b/faafo/requirements.txt index 2f9abf2..fd17007 100644 --- a/faafo/requirements.txt +++ b/faafo/requirements.txt @@ -4,7 +4,7 @@ pytz positional iso8601 #anyjson>=0.3.3 -anyjson +#anyjson #eventlet>=0.17.4 eventlet #PyMySQL>=0.6.2,<0.7 # 0.7 design change breaks faafo, MIT License -- 2.34.1 From f8a675fbd128abf5e9db80b29710f0e98e5b44d0 Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Tue, 19 Mar 2024 14:08:53 +0100 Subject: [PATCH 12/14] changed from mysql to mariadb to fix privilege probs --- faafo/contrib/install.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/faafo/contrib/install.sh b/faafo/contrib/install.sh index b763407..2951daa 100755 --- a/faafo/contrib/install.sh +++ b/faafo/contrib/install.sh @@ -84,11 +84,11 @@ if [[ -e /etc/os-release ]]; then if [[ $INSTALL_DATABASE -eq 1 ]]; then if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then - sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server python3-mysqldb + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mariadb-server python3-mysqldb # HSFD changes for Ubuntu 18.04 - sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf - #sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf - sudo service mysql restart + #sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf + ##sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf + sudo systemctl restart mariadb elif [[ $ID = 'fedora' ]]; then # fedora currently not tested nor supported sudo dnf install -y mariadb-server python3-mysql @@ -99,7 +99,7 @@ if [[ -e /etc/os-release ]]; then echo "error: distribution $ID not supported" exit 1 fi - sudo mysqladmin password password + sudo mariadb-admin password password sudo mysql -uroot -ppassword mysql -e "CREATE DATABASE IF NOT EXISTS faafo; GRANT ALL PRIVILEGES ON faafo.* TO 'faafo'@'%' IDENTIFIED BY 'password';" URL_DATABASE='mysql://root:password@localhost/faafo' fi -- 2.34.1 From eeb9bbf43ce2a2a5bd77c4f9296be316e0b6496e Mon Sep 17 00:00:00 2001 From: Sebastian Rieger Date: Tue, 19 Mar 2024 14:41:26 +0100 Subject: [PATCH 13/14] bumped default distro in demos, bound mariadb to all ips, added template explaination --- demo1-getting-started.py | 4 ++-- demo2-instance-with-init-script.py | 4 ++-- demo3-microservice.py | 10 ++-------- demo4-scale-out-add-worker.py | 4 ++-- demo4-scale-out.py | 13 ++++--------- faafo/contrib/install.sh | 4 +++- faafo/faafo/api/templates/index.html | 4 ++-- 7 files changed, 17 insertions(+), 26 deletions(-) diff --git a/demo1-getting-started.py b/demo1-getting-started.py index a1c59ef..86d11f9 100644 --- a/demo1-getting-started.py +++ b/demo1-getting-started.py @@ -32,8 +32,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +#ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 22.04 - Jammy Jellyfish - 64-bit - Cloud Based Image" # default region region_name = 'RegionOne' diff --git a/demo2-instance-with-init-script.py b/demo2-instance-with-init-script.py index 6497b44..dc5130c 100644 --- a/demo2-instance-with-init-script.py +++ b/demo2-instance-with-init-script.py @@ -20,8 +20,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +#ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 22.04 - Jammy Jellyfish - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # diff --git a/demo3-microservice.py b/demo3-microservice.py index 4f2661a..f5e616a 100644 --- a/demo3-microservice.py +++ b/demo3-microservice.py @@ -24,8 +24,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +#ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 22.04 - Jammy Jellyfish - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -166,12 +166,6 @@ def main(): # ########################################################################### - # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh - # is currently broken, hence the "rabbitctl" lines were added in the example - # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 - # - # Thanks to Stefan Friedmann for finding this fix ;) - #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' # testing / faafo dev branch: hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' diff --git a/demo4-scale-out-add-worker.py b/demo4-scale-out-add-worker.py index bc73745..6d3630c 100644 --- a/demo4-scale-out-add-worker.py +++ b/demo4-scale-out-add-worker.py @@ -26,8 +26,8 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +#ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 22.04 - Jammy Jellyfish - 64-bit - Cloud Based Image" # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # diff --git a/demo4-scale-out.py b/demo4-scale-out.py index 7ee3f07..4154226 100644 --- a/demo4-scale-out.py +++ b/demo4-scale-out.py @@ -13,7 +13,7 @@ from libcloud.compute.types import Provider # Please use 1-29 for X in the following variable to specify your group number. (will be used for the username, # project etc., as coordinated in the lab sessions) -group_number = X +group_number = 2 # web service endpoint of the private cloud infrastructure @@ -26,8 +26,9 @@ project_name = 'CloudComp' + str(group_number) project_network = 'CloudComp' + str(group_number) + '-net' # The image to look for and use for the started instance -#ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image" -ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +#ubuntu_image_name = "Ubuntu 20.04 - Focal Fossa - 64-bit - Cloud Based Image" +ubuntu_image_name = "Ubuntu 22.04 - Jammy Jellyfish - 64-bit - Cloud Based Image" + # The public key to be used for SSH connection, please make sure, that you have the corresponding private key # @@ -227,12 +228,6 @@ def main(): # ########################################################################### - # https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh - # is currently broken, hence the "rabbitctl" lines were added in the example - # below, see also https://bugs.launchpad.net/faafo/+bug/1679710 - # - # Thanks to Stefan Friedmann for finding this fix ;) - #hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' # testing / faafo dev branch: hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' diff --git a/faafo/contrib/install.sh b/faafo/contrib/install.sh index 2951daa..7db1df5 100755 --- a/faafo/contrib/install.sh +++ b/faafo/contrib/install.sh @@ -88,18 +88,20 @@ if [[ -e /etc/os-release ]]; then # HSFD changes for Ubuntu 18.04 #sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf ##sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf + sudo sed -i -e "s/127.0.0.1/0.0.0.0/g" /etc/mysql/mariadb.conf.d/50-server.cnf + sudo mysqmladmin password password sudo systemctl restart mariadb elif [[ $ID = 'fedora' ]]; then # fedora currently not tested nor supported sudo dnf install -y mariadb-server python3-mysql printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf + sudo mysqmladmin password password sudo systemctl enable mariadb sudo systemctl start mariadb else echo "error: distribution $ID not supported" exit 1 fi - sudo mariadb-admin password password sudo mysql -uroot -ppassword mysql -e "CREATE DATABASE IF NOT EXISTS faafo; GRANT ALL PRIVILEGES ON faafo.* TO 'faafo'@'%' IDENTIFIED BY 'password';" URL_DATABASE='mysql://root:password@localhost/faafo' fi diff --git a/faafo/faafo/api/templates/index.html b/faafo/faafo/api/templates/index.html index acc946f..8a7a142 100644 --- a/faafo/faafo/api/templates/index.html +++ b/faafo/faafo/api/templates/index.html @@ -57,9 +57,9 @@ yb = {{ fractal.yb }} {% endfor %} {{render_pagination(fractals)}} -