Download as txt, pdf, or txt
Download as txt, pdf, or txt
You are on page 1of 8

"""

Copyright (c) 2019 Nutanix Inc. All rights reserved.

Author: Tarun Mehta <tarun.mehta@nutanix.com>

Description: This class handles the registration of database.


This class initializes register database workflow and executes it.
"""
from abc import ABCMeta, abstractmethod
import traceback
from nutanix_era.common.Logging.era_logger import get_era_common_logger
from ...common.era.ClusteredTimeMachine.ERADatabase import ERADatabase

from ..workflow.workflow import StepGenWorkflow

class RegisterDatabaseOrchestrator(StepGenWorkflow, metaclass=ABCMeta):


"""
Handles the Registration of single and cluster database on new or existing db
server
This is base class and it executes the workflow defined by derived classes
"""

def __init__(self, era_op, flattened_work, host_properties, logger,


stepgen_manager):
"""
:type era_op: EraOperation
:type flattened_work: dict (flattened work attributes)
:type logger: era_logger
:type stepgen_manager: StepGenManager
"""
super(RegisterDatabaseOrchestrator, self).__init__(logger, stepgen_manager)
self.era_op = era_op
self.inputs = flattened_work
self.host_properties = host_properties
self.register_dbserver = False
self.internal_operation = False
self.app_info = None
self.dbservers = []
if "operationId" in self.inputs:
self.common_logger =
get_era_common_logger(operation_id=self.inputs["operationId"])
else:
self.common_logger = get_era_common_logger()
self.clustered = str(self.inputs.get('clustered')).lower() == "true"
self.database_name = self.inputs.get('database_name') # DB name in DB
server
if not self.database_name:
self.database_name = self.inputs.get('databaseName') # DB name in ERA
self.database_id = self.inputs.get('databaseId')
self.log_driver_nx_cluster_id = self.inputs.get('logDriveNxClusterId')
self.nx_cluster_id = self.inputs.get('nxClusterId')
self.db_server_id = self.inputs.get('dbserverId')
self.is_owner_era_server = str(self.inputs.get('isOwnerEraServer')).lower()
== "true"
self.register_dbserver = str(self.inputs.get('registerDbserver')).lower()
== "true"
self.internal_operation = str(self.inputs.get('internalOperation')).lower()
== "true"
self.skip_rollback = str(self.inputs.get('skipRollback')).lower() == "true"
self.category = str(self.inputs.get('category')).lower()
self.database_group = self.category == "db_group" or self.category ==
"db_group_dbserver"
self.operation_owner_id = self.inputs.get('ownerId')
self.era_database = None
self.register_database_result = None
self.db_server_metadata = None
self.database_group_name = ""
self.database_group_id = ""
self.database_create_new_group = False
self.auto_register_database = False
self.register_all_database = False
self.database_group_db_list = []
self.database_group_db_id_list = []
self.overall_database_group_db_list = []
self.timeMachineId = self.inputs.get("timeMachineId", "")
self.is_fci = False
if self.database_group:
self.update_database_group_info()
if self.inputs.get("cluster_resource_type") == "FCI" or
str(self.inputs.get("isFci")).lower() == "true":
self.inputs["isFci"] = "true"
self.is_fci = True
self.inputs["sql_network_name"] =
self.inputs.get("cluster_resource_name")

def __enter__(self):
# Pre workflow steps
# Add this function in derived classes for derive class cleanup
self.logger.DEBUG("Enter RegisterDatabaseOrchestrator")
return self

def __exit__(self, exc_type, exc_val, exc_tb):


# Post workflow steps/cleanup
# Add this function in derived classes for derive class cleanup
self.logger.DEBUG("Exit RegisterDatabaseOrchestrator")

def __str__(self):
return str(self.__dict__)

def update_database_group_info(self):
# get databaseGroupInfo information :
database_group_info = {}
if self.inputs.get("payload"):
database_group_info =
self.inputs.get("payload").get("databaseGroupingInfo")
instance_names = []
recovery_models = []
if not database_group_info:
return
self.database_group_name =
str(database_group_info.get("newDatabaseGroupName"))
self.database_create_new_group = database_group_info.get("createNewGroup",
False)
self.database_group_id = database_group_info.get("databaseGroupId")
self.auto_register_database =
database_group_info.get("autoRegisterDatabases")
self.register_all_database =
database_group_info.get("registerAllDatabases", False)
self.database_name = self.database_group_name

databases = database_group_info.get("databases")
if not databases:
return
for database in databases:
self.database_group_db_id_list.append((database.get("id")))

action_arguments = database.get("actionArguments")
if action_arguments:
for prop in action_arguments:
if str(prop.get("name")).lower() == "instance_name":
instance_name = prop.get("value")
if instance_name not in instance_names:
instance_names.append(instance_name)
if instance_names.__len__() > 1:
raise Exception("Group databases cannot be formed
between instances")
if str(prop.get("name")).lower() == "recovery_model":
recovery_model = prop.get("value")
if recovery_model not in recovery_models:
recovery_models.append(prop.get("value"))
if recovery_models.__len__() > 1:
raise Exception("Databases in the group cannot have
different recovery models")
if str(prop.get("name")).lower() == "database_name":
self.database_group_db_list.append(prop.get("value"))

self.overall_database_group_db_list.append(prop.get("value"))

if self.database_group_id:
self.era_database = ERADatabase(id=self.database_group_id,
type="advanced", is_clone_app=False,

log_file_path=self.inputs['LOG_DIRECTORY_PATH'],

log_file_name=self.inputs['LOG_FILE_NAME'], logger=self.logger,
fetch_info=True)
self.database_group_name = self.era_database.databaseName
self.database_name = self.database_group_name
for database in self.era_database.databases:
if database.get("databaseName") not in
self.overall_database_group_db_list:

self.overall_database_group_db_list.append(database.get("databaseName"))
self.logger.INFO(
"database_group_name {0}, database_create_new_group {1},
database_group_id {2}, database_group_list {3}, "
"overall_list {4}".format(self.database_group_name,
str(self.database_create_new_group),
self.database_group_id,
str(self.database_group_db_list),
str(self.overall_database_group_db_list)))

@abstractmethod
def initialize(self):
pass
def submit_sub_operation_for_database_registration(self, properties=None,
primary_dbserver=None):
#
# activate_database_interface
#
self.logger.INFO("start
{}:submit_sub_operation_for_database_registration".format(self.__class__.__name__))
nodes = []
for dbserver in self.dbservers:
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
nodes.append(node)

parent_operation = {
"id": self.inputs["operationId"],
"stepIndex": self.step_index
}

from ...common.era.ClusteredTimeMachine.ERADatabase import ERADatabase


self.era_database = ERADatabase(id=self.inputs['databaseId'],
type="advanced", is_clone_app=False,

log_file_path=self.inputs['LOG_DIRECTORY_PATH'],
log_file_name=self.inputs['LOG_FILE_NAME'],
logger=self.logger, fetch_info=True)
self.logger.INFO("associating database to logical cluster")
if properties:
self.era_database.set_properties(properties)
if str(self.inputs.get("cluster_resource_type")).lower() == "fci":
self.submit_sub_operation_for_fci_registration(parent_operation,
primary_dbserver)
return
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, self.inputs[
'databaseId'], parent_operation, clustered=self.clustered,
operation_owner_id=self.operation_owner_id)

def submit_sub_operation_for_fci_registration(self, parent_operation,


primary_dbserver):
#
# fci db registration
#
self.logger.INFO("start
{}:submit_sub_operation_for_fci_registration".format(self.__class__.__name__))
# In case of FCI we first register the FCI active node and later use the
properties from the active node as the
# properties for the passive node. hence the submit sub operation is split
into two different calls
nodes = []
node = {
"host_ip": primary_dbserver.ip_address,
"dbserverId": primary_dbserver.get_dbserver_id()
}
nodes.append(node)
self.logger.INFO("register fci active")
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, self.inputs[
'databaseId'], parent_operation, clustered=self.clustered,
operation_owner_id=self.operation_owner_id,

set_ready_state=False)

if not self.register_database_result["overallStatus"]:
error_message = self.register_database_result.get("errorMessage",
"Failed to register database")
raise Exception(error_message)

nodes = []
self.logger.INFO("register fci passive nodes")
for dbserver in self.dbservers:
if dbserver == primary_dbserver:
self.logger.INFO("skipping active")
continue
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
nodes.append(node)

# register database result is being overwritten, as it is not being used in


the rollback, the merge of register
# results has not been done. if need be, merging or register result can be
done
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, self.inputs[
'databaseId'], parent_operation, clustered=self.clustered,
operation_owner_id=self.operation_owner_id,

is_pd_shared=True)

def activate_time_machine(self):
#
# Activate time machine of the database
#
self.logger.INFO("start
{}:activate_time_machine".format(self.__class__.__name__))
# Do not activate the time machine, if database_group_id is passed.
if self.database_group_id:
return
if not self.era_database:
self.era_database = ERADatabase(id=self.inputs['databaseId'],
type="advanced", is_clone_app=False,

log_file_path=self.inputs['LOG_DIRECTORY_PATH'],

log_file_name=self.inputs['LOG_FILE_NAME'], logger=self.logger,
fetch_info=True)

application_info = {
"id": self.inputs['databaseId'],
"name": self.database_name
}

log_drive_mount_point = "/home/era/era_base/log_drive/winlogdrive_" + str(

RegisterDatabaseOrchestrator.convert_app_name_to_valid_dir_path(self.database_name)
)

if self.is_owner_era_server:
self.fetch_database_app_info()

basic_info = self.app_info["basic_info"]
db_size = basic_info["SIZE"]
db_size_unit = basic_info.get("SIZE_UNIT", "GB")

era_server_info = {
"mount_point": log_drive_mount_point,
"disk_size": self.inputs["log_disk_size"],
"db_size": db_size,
"db_size_unit": db_size_unit,
"cloud_id_list": self.inputs.get("pitrEnabledClusterIds"),
"log_drive_fs_type": "ntfs"
}

parent_operation = {
"id": self.inputs["operationId"],
"stepIndex": self.step_index
}

status = self.era_database.time_machine_activate(self.is_owner_era_server,
parent_operation, application_info,
era_server_info,
operation_owner_id=self.operation_owner_id)

if not status:
raise Exception("Failed to activate the time machine of database.")

#
# Associating the database to all the DB servers so that clone into source
can directly be done
#
if not (self.database_group or str(self.inputs.get("isFci")).lower() ==
"true"):
self.associate_time_machine_with_dbservers()

#
# Save database properties
#
self.save_database_properties()

#
# Updating auto_register flag on the dbserver
#
self.update_auto_register_flag()

def update_auto_register_flag(self):
self.logger.INFO("Start update_auto_register_flag")
if self.auto_register_database:
self.logger.INFO("Set db_group_auto_registration as True")
# set dbserver property with auto-register = True.
from
nutanix_era.era_drivers.common.era.ClusteredTimeMachine.ERADbServer import
ERADbServer
dbserver = ERADbServer(id=self.db_server_id,
type="basic",
logger=self.logger,
fetch_info=True)
properties = []
prop = {}
prop["name"] = "db_group_auto_registration"
prop["value"] = True
properties.append(prop)
dbserver.set_properties(properties)

def associate_time_machine_with_dbservers(self):

self.logger.INFO("associating time machine with dbservers")


self.logger.INFO(self.dbservers)
time_machine_id = self.era_database.get_time_machine_object().id
import nutanix_era.common.mgmt_server.TimeMachineUtil as TimeMachineUtil
time_machine_util = TimeMachineUtil.TimeMachineUtil(None)
dbserver_id_list = []
for dbserver in self.dbservers:
self.logger.INFO("associating time machine " + str(time_machine_id) +
" with dbserver " + str(dbserver.ip_address))
dbserver_id_list.append(dbserver.get_dbserver_id())

# in case of register operation picked directly on dbserver, dbserver


object is not created
if not self.dbservers:
dbserver_id_list.append(self.db_server_id)
self.logger.INFO("associating time machine " + str(time_machine_id) +
" with dbserver " + str(self.db_server_id))

self.logger.INFO("dbserver ID list:" + str(dbserver_id_list))

try:
status = time_machine_util.associate_dbserver_with_time_machine(
id=time_machine_id, is_name=False,
dbserver_id_list=dbserver_id_list)
if not status:
return 0
except Exception as e:
self.logger.INFO(str(str(e)))
self.logger.INFO(traceback.format_exc())
self.logger.INFO("failed to associate time machine " +
str(time_machine_id) + " with dbservers " +
str(dbserver_id_list))
return 1

def save_database_properties(self):
#
# Save database properties
#
try:
self.set_database_info()
self.logger.INFO("Saving database properties...")
basic_info = self.app_info["basic_info"]
properties = []
for key in basic_info:
properties.append({"name": key, "value": basic_info[key]})

if self.inputs.get("recovery_model"):
properties.append({"name": "recovery_model", "value":
self.inputs["recovery_model"]})

if self.inputs.get("era_manage_log"):
properties.append({"name": "era_manage_log", "value":
self.inputs["era_manage_log"]})

properties.append({"name": "isFci", "value":


str(self.inputs.get("cluster_resource_type")).lower() == "fci"})

self.logger.INFO(properties)

self.era_database.set_properties(properties)
self.logger.INFO("Updating database metrics...")
metrics_object = {"storage": {"size": basic_info['SIZE'], "usedSize":
basic_info['used_size'], "unit": basic_info['SIZE_UNIT']}}
self.era_database.update_database_metrics(metrics_object)
except Exception as ex:
self.logger.INFO(traceback.format_exc())
self.logger.INFO("Failed to save database properties. Error: " +
str(str(ex)))

def set_database_info(self):
#
# Setting basic info to database info column.
# TODO - Need to revisit if we are okay to use database properties instead
of info. Keeping the code for
# future use
#
self.logger.INFO("Start set_database_info")
basic_info = self.app_info["basic_info"]
self.era_database.set_info(info=basic_info, secure_info=None)

def fetch_database_app_info(self):
#
# Fetch the app info json from ERA server. As part of Activate call app
info is loaded to ERA server.
#
self.logger.INFO("start fetch_database_app_info")
database_nodes = self.era_database.get_All_databaseNode()
first_database_node_id = database_nodes[0]["id"] # Get first node TODO get
MAX size of database for cluster db
self.logger.INFO("database node id : {}".format(first_database_node_id))
from ...common.era.ClusteredTimeMachine.ERADatabaseNode import
ERADatabaseNode
database_node = ERADatabaseNode(id=first_database_node_id,
database_id=self.era_database.id,
type="advanced", fetch_info=False,
logger=self.logger)
self.app_info = database_node.get_app_info_json()['info']

@staticmethod
def convert_app_name_to_valid_dir_path(app_name):
import re
app_name = re.sub(r'[^a-zA-Z0-9._-]', "_", app_name)
return app_name

You might also like