Professional Documents
Culture Documents
Register Database Orchestrator - Py
Register Database Orchestrator - Py
def __enter__(self):
# Pre workflow steps
# Add this function in derived classes for derive class cleanup
self.logger.DEBUG("Enter RegisterDatabaseOrchestrator")
return self
def __str__(self):
return str(self.__dict__)
def update_database_group_info(self):
# get databaseGroupInfo information :
database_group_info = {}
if self.inputs.get("payload"):
database_group_info =
self.inputs.get("payload").get("databaseGroupingInfo")
instance_names = []
recovery_models = []
if not database_group_info:
return
self.database_group_name =
str(database_group_info.get("newDatabaseGroupName"))
self.database_create_new_group = database_group_info.get("createNewGroup",
False)
self.database_group_id = database_group_info.get("databaseGroupId")
self.auto_register_database =
database_group_info.get("autoRegisterDatabases")
self.register_all_database =
database_group_info.get("registerAllDatabases", False)
self.database_name = self.database_group_name
databases = database_group_info.get("databases")
if not databases:
return
for database in databases:
self.database_group_db_id_list.append((database.get("id")))
action_arguments = database.get("actionArguments")
if action_arguments:
for prop in action_arguments:
if str(prop.get("name")).lower() == "instance_name":
instance_name = prop.get("value")
if instance_name not in instance_names:
instance_names.append(instance_name)
if instance_names.__len__() > 1:
raise Exception("Group databases cannot be formed
between instances")
if str(prop.get("name")).lower() == "recovery_model":
recovery_model = prop.get("value")
if recovery_model not in recovery_models:
recovery_models.append(prop.get("value"))
if recovery_models.__len__() > 1:
raise Exception("Databases in the group cannot have
different recovery models")
if str(prop.get("name")).lower() == "database_name":
self.database_group_db_list.append(prop.get("value"))
self.overall_database_group_db_list.append(prop.get("value"))
if self.database_group_id:
self.era_database = ERADatabase(id=self.database_group_id,
type="advanced", is_clone_app=False,
log_file_path=self.inputs['LOG_DIRECTORY_PATH'],
log_file_name=self.inputs['LOG_FILE_NAME'], logger=self.logger,
fetch_info=True)
self.database_group_name = self.era_database.databaseName
self.database_name = self.database_group_name
for database in self.era_database.databases:
if database.get("databaseName") not in
self.overall_database_group_db_list:
self.overall_database_group_db_list.append(database.get("databaseName"))
self.logger.INFO(
"database_group_name {0}, database_create_new_group {1},
database_group_id {2}, database_group_list {3}, "
"overall_list {4}".format(self.database_group_name,
str(self.database_create_new_group),
self.database_group_id,
str(self.database_group_db_list),
str(self.overall_database_group_db_list)))
@abstractmethod
def initialize(self):
pass
def submit_sub_operation_for_database_registration(self, properties=None,
primary_dbserver=None):
#
# activate_database_interface
#
self.logger.INFO("start
{}:submit_sub_operation_for_database_registration".format(self.__class__.__name__))
nodes = []
for dbserver in self.dbservers:
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
nodes.append(node)
parent_operation = {
"id": self.inputs["operationId"],
"stepIndex": self.step_index
}
log_file_path=self.inputs['LOG_DIRECTORY_PATH'],
log_file_name=self.inputs['LOG_FILE_NAME'],
logger=self.logger, fetch_info=True)
self.logger.INFO("associating database to logical cluster")
if properties:
self.era_database.set_properties(properties)
if str(self.inputs.get("cluster_resource_type")).lower() == "fci":
self.submit_sub_operation_for_fci_registration(parent_operation,
primary_dbserver)
return
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, self.inputs[
'databaseId'], parent_operation, clustered=self.clustered,
operation_owner_id=self.operation_owner_id)
set_ready_state=False)
if not self.register_database_result["overallStatus"]:
error_message = self.register_database_result.get("errorMessage",
"Failed to register database")
raise Exception(error_message)
nodes = []
self.logger.INFO("register fci passive nodes")
for dbserver in self.dbservers:
if dbserver == primary_dbserver:
self.logger.INFO("skipping active")
continue
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
nodes.append(node)
is_pd_shared=True)
def activate_time_machine(self):
#
# Activate time machine of the database
#
self.logger.INFO("start
{}:activate_time_machine".format(self.__class__.__name__))
# Do not activate the time machine, if database_group_id is passed.
if self.database_group_id:
return
if not self.era_database:
self.era_database = ERADatabase(id=self.inputs['databaseId'],
type="advanced", is_clone_app=False,
log_file_path=self.inputs['LOG_DIRECTORY_PATH'],
log_file_name=self.inputs['LOG_FILE_NAME'], logger=self.logger,
fetch_info=True)
application_info = {
"id": self.inputs['databaseId'],
"name": self.database_name
}
RegisterDatabaseOrchestrator.convert_app_name_to_valid_dir_path(self.database_name)
)
if self.is_owner_era_server:
self.fetch_database_app_info()
basic_info = self.app_info["basic_info"]
db_size = basic_info["SIZE"]
db_size_unit = basic_info.get("SIZE_UNIT", "GB")
era_server_info = {
"mount_point": log_drive_mount_point,
"disk_size": self.inputs["log_disk_size"],
"db_size": db_size,
"db_size_unit": db_size_unit,
"cloud_id_list": self.inputs.get("pitrEnabledClusterIds"),
"log_drive_fs_type": "ntfs"
}
parent_operation = {
"id": self.inputs["operationId"],
"stepIndex": self.step_index
}
status = self.era_database.time_machine_activate(self.is_owner_era_server,
parent_operation, application_info,
era_server_info,
operation_owner_id=self.operation_owner_id)
if not status:
raise Exception("Failed to activate the time machine of database.")
#
# Associating the database to all the DB servers so that clone into source
can directly be done
#
if not (self.database_group or str(self.inputs.get("isFci")).lower() ==
"true"):
self.associate_time_machine_with_dbservers()
#
# Save database properties
#
self.save_database_properties()
#
# Updating auto_register flag on the dbserver
#
self.update_auto_register_flag()
def update_auto_register_flag(self):
self.logger.INFO("Start update_auto_register_flag")
if self.auto_register_database:
self.logger.INFO("Set db_group_auto_registration as True")
# set dbserver property with auto-register = True.
from
nutanix_era.era_drivers.common.era.ClusteredTimeMachine.ERADbServer import
ERADbServer
dbserver = ERADbServer(id=self.db_server_id,
type="basic",
logger=self.logger,
fetch_info=True)
properties = []
prop = {}
prop["name"] = "db_group_auto_registration"
prop["value"] = True
properties.append(prop)
dbserver.set_properties(properties)
def associate_time_machine_with_dbservers(self):
try:
status = time_machine_util.associate_dbserver_with_time_machine(
id=time_machine_id, is_name=False,
dbserver_id_list=dbserver_id_list)
if not status:
return 0
except Exception as e:
self.logger.INFO(str(str(e)))
self.logger.INFO(traceback.format_exc())
self.logger.INFO("failed to associate time machine " +
str(time_machine_id) + " with dbservers " +
str(dbserver_id_list))
return 1
def save_database_properties(self):
#
# Save database properties
#
try:
self.set_database_info()
self.logger.INFO("Saving database properties...")
basic_info = self.app_info["basic_info"]
properties = []
for key in basic_info:
properties.append({"name": key, "value": basic_info[key]})
if self.inputs.get("recovery_model"):
properties.append({"name": "recovery_model", "value":
self.inputs["recovery_model"]})
if self.inputs.get("era_manage_log"):
properties.append({"name": "era_manage_log", "value":
self.inputs["era_manage_log"]})
self.logger.INFO(properties)
self.era_database.set_properties(properties)
self.logger.INFO("Updating database metrics...")
metrics_object = {"storage": {"size": basic_info['SIZE'], "usedSize":
basic_info['used_size'], "unit": basic_info['SIZE_UNIT']}}
self.era_database.update_database_metrics(metrics_object)
except Exception as ex:
self.logger.INFO(traceback.format_exc())
self.logger.INFO("Failed to save database properties. Error: " +
str(str(ex)))
def set_database_info(self):
#
# Setting basic info to database info column.
# TODO - Need to revisit if we are okay to use database properties instead
of info. Keeping the code for
# future use
#
self.logger.INFO("Start set_database_info")
basic_info = self.app_info["basic_info"]
self.era_database.set_info(info=basic_info, secure_info=None)
def fetch_database_app_info(self):
#
# Fetch the app info json from ERA server. As part of Activate call app
info is loaded to ERA server.
#
self.logger.INFO("start fetch_database_app_info")
database_nodes = self.era_database.get_All_databaseNode()
first_database_node_id = database_nodes[0]["id"] # Get first node TODO get
MAX size of database for cluster db
self.logger.INFO("database node id : {}".format(first_database_node_id))
from ...common.era.ClusteredTimeMachine.ERADatabaseNode import
ERADatabaseNode
database_node = ERADatabaseNode(id=first_database_node_id,
database_id=self.era_database.id,
type="advanced", fetch_info=False,
logger=self.logger)
self.app_info = database_node.get_app_info_json()['info']
@staticmethod
def convert_app_name_to_valid_dir_path(app_name):
import re
app_name = re.sub(r'[^a-zA-Z0-9._-]', "_", app_name)
return app_name