Alter Index

You might also like

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 56

-- Alter Index:

---------------
ALTER INDEX <INDEXNAME> REBUILD;

-- Alter Sequence:
-------------------
select "QBEXSQL"."BillingDocNumbers".nextval
from dummy;

ALTER SEQUENCE "QBEXSQL"."BillingDocNumbers"


RESTART WITH 6;

-- Any other parameters can be changed

-- Alter Table:
---------------
CREATE COLUMN TABLE "QBEXSQL"."EMPtab4ALTER"
(
"EMPID" INT ,
"EMPNAME" VARCHAR(20),
"DEPTNO" VARCHAR(5)
);

-- Add New Column:


------------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
ADD ("DEPTNAME" VARCHAR(20));

-- Drop Column
---------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
DROP ( "DEPTNAME" );

-- Alter Column Definition


--------------------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
ALTER ( "DEPTNO" VARCHAR(10));

ALTER TABLE "QBEXSQL"."EMPtab4ALTER"


ALTER ( "EMPID" BIGINT);

-- Add Constraint
-----------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
ADD CONSTRAINT PK PRIMARY KEY ("EMPID");

-- Drop Primary key


--------------------

ALTER TABLE "QBEXSQL"."EMPtab4ALTER"


DROP PRIMARY KEY;

-- Preload Clause
-----------------

ALTER TABLE "QBEXSQL"."EMPtab4ALTER"


PRELOAD ("EMPNAME", "DEPTNO");

ALTER TABLE "QBEXSQL"."EMPtab4ALTER"


PRELOAD ALL;

-- Alter Table Type


--------------------
ALTER TABLE "QBEXSQL"."EMPtab4ALTER"
ALTER TYPE ROW;

-- Add Partition
-----------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
ADD PARTITION VALUE = '2015-08' ;

-- Drop Partition:
-------------------

ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"


DROP PARTITION VALUE = '2015-08' ;

-- Auto Merge
-------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
DISABLE AUTOMERGE ;

ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"


ENABLE AUTOMERGE ;

-- Unload Priority
------------------
ALTER TABLE "QBEXSQL"."RANGEPARTMONTH"
UNLOAD PRIORITY 6 ;

-- Auto Merge
-- Merge Dog

CREATE COLUMN TABLE "QBEXSQL"."EMPMERGE"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
) NO AUTO MERGE;

-- Create DB Table:
-------------------
--1. Create Simple COLUMN Table

CREATE COLUMN TABLE "QBEXSQL"."EMP"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

--2. Create Simple ROW Table

CREATE ROW TABLE "QBEXSQL"."EMPROW"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

-- Comments:
------------
CREATE COLUMN TABLE "QBEXSQL"."EMPCOMMENTS"
(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

COMMENT ON TABLE "QBEXSQL"."EMPCOMMENTS" IS 'Employee Details Table';

COMMENT ON COLUMN "QBEXSQL"."EMPCOMMENTS"."EMPID" IS 'Employee Number';

COMMENT ON COLUMN "QBEXSQL"."EMPCOMMENTS"."EMPNAME" IS 'Employee Name';

-- Create Table with Like or Subquery:


---------------------------------------
-- Base Table:
----------------
CREATE COLUMN TABLE "QBEXSQL"."EMPBASE"
(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

insert into "QBEXSQL"."EMPBASE" values (1,'Pratap');


insert into "QBEXSQL"."EMPBASE" values (2,'Pratap');
insert into "QBEXSQL"."EMPBASE" values (3,'Pratap');

CREATE COLUMN TABLE "QBEXSQL"."EMPWITHDATA" LIKE "QBEXSQL"."EMPBASE" WITH DATA;

CREATE COLUMN TABLE "QBEXSQL"."EMPWITHNODATA" LIKE "QBEXSQL"."EMPBASE" WITH NO DATA;

CREATE COLUMN TABLE "QBEXSQL"."EMPWITHDATASUB" AS


(
SELECT * FROM "QBEXSQL"."EMPBASE"
);

CREATE COLUMN TABLE "QBEXSQL"."EMPWITHNODATASUB" AS


(
SELECT * FROM "QBEXSQL"."EMPBASE"
) WITH NO DATA;

-- Triggers [max 1024 on one DB table]


--------------------------------------
CREATE COLUMN TABLE "QBEXSQL"."AUDIT_TABLE"
(
"USERNAME" VARCHAR(20) NOT NULL,
"TIMING" TIMESTAMP NOT NULL,
PRIMARY KEY ("USERNAME","TIMING")
);

CREATE COLUMN TABLE "QBEXSQL"."EMPTRIGGER"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

CREATE TRIGGER "QBEXSQL"."InsertLogs"


AFTER INSERT ON "QBEXSQL"."EMPTRIGGER" FOR EACH ROW
BEGIN
INSERT INTO "QBEXSQL"."AUDIT_TABLE" VALUES (CURRENT_USER,CURRENT_TIMESTAMP);
END;

INSERT INTO "QBEXSQL"."EMPTRIGGER" VALUES (1,'Pratap');

INSERT INTO "QBEXSQL"."EMPTRIGGER" VALUES (2,'Pratap');

INSERT INTO "QBEXSQL"."EMPTRIGGER" VALUES (3,'Pratap');

INSERT INTO "QBEXSQL"."EMPTRIGGER" VALUES (4,'Pratap');

SELECT * FROM "QBEXSQL"."AUDIT_TABLE";

-- Create & Drop Extended Storage:


----------------------------------
The CREATE EXTENDED STORAGE statement defines an external Sybase IQ database connection for the SAP HANA
database.
The Sybase IQ database will automatically be used to transparently archive and query data stored in extended tables.
Only database users having the system privilege CREATE SOURCE or DATA ADMIN are allowed to create extended
storage.

--3. Create Global temporary Table


-----------------------------------
CREATE GLOBAL TEMPORARY COLUMN TABLE "QBEXSQL"."EMPTMPCOLUMN"
(
"EMPID" INT NOT NULL,
"EMPNAME" VARCHAR(20)
);

INSERT INTO "QBEXSQL"."EMPTMPCOLUMN"


VALUES (1,'Pratap');

INSERT INTO "QBEXSQL"."EMPTMPCOLUMN"


VALUES (2,'Pratap');

INSERT INTO "QBEXSQL"."EMPTMPCOLUMN"


VALUES (3,'Pratap');

INSERT INTO "QBEXSQL"."EMPTMPCOLUMN"


VALUES (4,'Pratap');

select * from "QBEXSQL"."EMPTMPCOLUMN";

-- Table Partitioning
----------------------

-- Hash, Range, Round Robin

-- Hash

CREATE COLUMN TABLE "QBEXSQL"."EMPHASH"


(
"EMPID" INT PRIMARY KEY,
"EMPNAME" VARCHAR(20)
) PARTITION BY HASH ("EMPID") PARTITIONS 4;

INSERT INTO "QBEXSQL"."EMPHASH"


VALUES (1,'Pratap');
INSERT INTO "QBEXSQL"."EMPHASH"
VALUES (2,'Pratap');
INSERT INTO "QBEXSQL"."EMPHASH"
VALUES (3,'Pratap');
INSERT INTO "QBEXSQL"."EMPHASH"
VALUES (4,'Pratap');

-- Create Index
----------------
-- The CREATE INDEX statement creates an index on a table with the selected column(s).
-- When column data types are character string types, binary string types, decimal types,
-- or when the constraint is a composite key, or a non-unique constraint, the default index type is CPBTREE.
-- In other cases BTREE will be used. If neither BTREE nor CPBTREE keyword is specified,
-- then the SAP HANA database chooses the appropriate index type.
---------------------------------------------------------------------------------------------------------------------------------
--INDEXES : Indexes currently defined on tables.
--INDEX_COLUMNS : Index column information.

CREATE COLUMN TABLE "QBEXSQL"."EMPSIMPLEIND"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

CREATE INDEX "MYSIMPLENDEX" ON "QBEXSQL"."EMPSIMPLEIND" ("EMPNAME");

CREATE COLUMN TABLE "QBEXSQL"."EMPIND"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

CREATE UNIQUE CPBTREE INDEX "MYFIRSTINDEX" ON "QBEXSQL"."EMPIND" ("EMPNAME");

CREATE COLUMN TABLE "QBEXSQL"."EMPINDEX"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

CREATE UNIQUE INDEX "MYSECONDINDEX" ON "QBEXSQL"."EMPINDEX" ("EMPNAME");

--3. Create local temporary Table


-----------------------------------
CREATE LOCAL TEMPORARY COLUMN TABLE "QBEXSQL"."#EMPTMPLOCALCOLUMN"
(
"EMPID" INT,
"EMPNAME" VARCHAR(20)
);

INSERT INTO "QBEXSQL"."#EMPTMPLOCALCOLUMN"


VALUES (1,'Pratap');

INSERT INTO "QBEXSQL"."#EMPTMPLOCALCOLUMN"


VALUES (2,'Pratap');

INSERT INTO "QBEXSQL"."#EMPTMPLOCALCOLUMN"


VALUES (3,'Pratap');

INSERT INTO "QBEXSQL"."#EMPTMPLOCALCOLUMN"


VALUES (4,'Pratap');
select * from "QBEXSQL"."#EMPTMPLOCALCOLUMN";

-- Logging and NO Logging


-------------------------
-- Default Logging
-- no logging
--Specifies the retention time in seconds of the column table created by NO LOGGING.
--After the specified retention period has elapsed, the table will be dropped if used physical memory of the host is above
80%.

CREATE COLUMN TABLE "QBEXSQL"."EMPNOLOGGINGRET"


(
"EMPID" INT,
"EMPNAME" VARCHAR(20)
) NO LOGGING RETENTION 300;

INSERT INTO "QBEXSQL"."EMPNOLOGGINGRET"


VALUES (1,'Pratap');

INSERT INTO "QBEXSQL"."EMPNOLOGGINGRET"


VALUES (2,'Pratap');

INSERT INTO "QBEXSQL"."EMPNOLOGGINGRET"


VALUES (3,'Pratap');

INSERT INTO "QBEXSQL"."EMPNOLOGGINGRET"


VALUES (4,'Pratap');

select * from "QBEXSQL"."EMPNOLOGGINGRET";

-- Primary Key & Foreign Key:


-----------------------------
CREATE COLUMN TABLE "QBEXSQL"."DEPARTMENT"
(
"DEPTNO" INT NOT NULL PRIMARY KEY,
"DEPTNAME" VARCHAR(20)
);

CREATE COLUMN TABLE "QBEXSQL"."EMPLOYEES"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20),
"DEPTNO" INT,
FOREIGN KEY ("DEPTNO") REFERENCES "QBEXSQL"."DEPARTMENT"("DEPTNO") ON DELETE CASCADE
);

CREATE COLUMN TABLE "QBEXSQL"."EMPLOYEES"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20),
"DEPTNO" INT,
FOREIGN KEY ("DEPTNO") REFERENCES "QBEXSQL"."DEPARTMENT"("DEPTNO") ON DELETE CASCADE
);

-- Range Partitioning
---------------------

CREATE COLUMN TABLE "QBEXSQL"."RANGEPART"


(
"ORDERID" INT NOT NULL,
"ORDERDATE" DATE NOT NULL,
"NETSALES" DECIMAL(10,2),
PRIMARY KEY ("ORDERID","ORDERDATE")
) PARTITION BY RANGE ("ORDERDATE")

(
PARTITION '2015-01-01' <= VALUES < '2015-06-01',
PARTITION VALUE = '2015-07-15', PARTITION OTHERS
);

INSERT INTO "QBEXSQL"."RANGEPART"


VALUES (1,'2015-01-15',15000.00);

INSERT INTO "QBEXSQL"."RANGEPART"


VALUES (2,'2015-03-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPART"
VALUES (3,'2015-07-15',15000.00);

INSERT INTO "QBEXSQL"."RANGEPART"


VALUES (4,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPART"
VALUES (5,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPART"
VALUES (6,'2016-07-15',15000.00);
-- Month

CREATE COLUMN TABLE "QBEXSQL"."RANGEPARTMONTH"


(
"ORDERID" INT NOT NULL,
"ORDERDATE" DATE NOT NULL,
"NETSALES" DECIMAL(10,2),
PRIMARY KEY ("ORDERID","ORDERDATE")
) PARTITION BY RANGE (MONTH("ORDERDATE"))

(
PARTITION '2015-01' <= VALUES < '2015-06',
PARTITION VALUE = '2015-07', PARTITION OTHERS
);
INSERT INTO "QBEXSQL"."RANGEPARTMONTH"
VALUES (1,'2015-01-15',15000.00);

INSERT INTO "QBEXSQL"."RANGEPARTMONTH"


VALUES (2,'2015-03-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPARTMONTH"
VALUES (3,'2015-07-15',15000.00);

INSERT INTO "QBEXSQL"."RANGEPARTMONTH"


VALUES (4,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPARTMONTH"
VALUES (5,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."RANGEPARTMONTH"
VALUES (6,'2016-07-15',15000.00);

-- Others:
-----------

1. Rename Column:
-----------------
rename COLUMN "QBEXSQL"."EMPtab4ALTER"."EMPID" TO "EMPNO";

2. Rename Table:
-----------------
RENAME TABLE "QBEXSQL"."EMPtab4ALTER1" TO "FORALTERS";

3. Rename Index:
-----------------
RENAME INDEX <OLDNAME> TO <NEWNAME>;

-- Round Robin:
---------------

-- No Primary key

CREATE COLUMN TABLE "QBEXSQL"."ROUNDROBIN"


(
"ORDERID" INT ,
"ORDERDATE" DATE,
"NETSALES" DECIMAL(10,2)
) PARTITION BY ROUNDROBIN PARTITIONS 4;

INSERT INTO "QBEXSQL"."ROUNDROBIN"


VALUES (1,'2015-01-15',15000.00);

INSERT INTO "QBEXSQL"."ROUNDROBIN"


VALUES (2,'2015-03-15',15000.00);
INSERT INTO "QBEXSQL"."ROUNDROBIN"
VALUES (3,'2015-07-15',15000.00);

INSERT INTO "QBEXSQL"."ROUNDROBIN"


VALUES (4,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."ROUNDROBIN"
VALUES (5,'2016-07-15',15000.00);
INSERT INTO "QBEXSQL"."ROUNDROBIN"
VALUES (6,'2016-07-15',15000.00);

-- Schema
----------
Create Schema "QBEXSQL";

Create Schema "QBEXSQL" OWNED BY "<USERNAME>"

-- Create Sequence
------------------
CREATE SEQUENCE "QBEXSQL"."BillingDocNumbers"

START WITH 1
INCREMENT BY 1
MINVALUE 1
MAXVALUE 100
CYCLE
CACHE 5
RESET BY
( SELECT MAX("EMPID") FROM "QBEXSQL"."EMPSIMPLEIND");

CREATE TABLE "QBEXSQL"."EMPNAMES"


(
"EMPANMES" VARCHAR(20)
);

INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Pratap');


INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Qbex Pratap');
INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Chand');
INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Pavan');
INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Ganga');
INSERT INTO "QBEXSQL"."EMPNAMES" VALUES ('Raja');

CREATE COLUMN TABLE "QBEXSQL"."EMP_IDS"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
);

INSERT INTO "QBEXSQL"."EMP_IDS"


( SELECT "QBEXSQL"."BillingDocNumbers".NEXTVAL, "EMPANMES" FROM "QBEXSQL"."EMPNAMES");

-- Unload Priority
-------------------
-- 0 --- 9

CREATE COLUMN TABLE "QBEXSQL"."EMPUNLOAD"


(
"EMPID" INT NOT NULL PRIMARY KEY,
"EMPNAME" VARCHAR(20)
) uNload priority 0;

-- Data Manipulation Statements


--------------------------------

1. Delete
2. Explain Plan
3. Insert
4. Load
5. Merge Delta
6. Replace / Upsert
7. Select
8. Truncate Table
9. Unload
10. Update

-- Delete:
----------
DELETE FROM "QBEXSQL"."DemoUpdate2";

DELETE FROM "QBEXSQL"."DemoUpdate"


WHERE "KEY" = 1;

-- Delta Merge:
---------------
- Main Store / Delta Store

- Types of Merging:
--------------------
- Auto Merge - Merge Dog - Decisiicon Function -
- Smart Merge - Decisin Function - Sql Statement
- Memory Merge - Mergeing in Main Memory - No changes to data persistenecy
- Hard Merge - Sql Statement - No Decision function - Token - triggers
- Forced Merge - Sql Satetmnent - Token - Triggered - even ehen thw server is busy
- Critical Merge - Delta Store - Treshhold value - Trigreed by System

CREATE COLUMN TABLE "QBEXSQL"."DemoMerging"


(
"ORDERID" INT NOT NULL PRIMARY KEY,
"ORDERDATE" DATE,
"NETSALES" DECIMAL(10,2),
"REGION" VARCHAR(10)
) NO AUTO MERGE;
INSERT INTO "QBEXSQL"."DemoMerging" VALUES (1,'2015-01-01',15000.00,'SOUTH');

INSERT INTO "QBEXSQL"."DemoMerging" ("ORDERID","NETSALES") VALUES (2,20000.00);

--Hard Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging";

DROP TABLE "QBEXSQL"."DemoMerging";

-- Smart Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging"
WITH PARAMETERS ('SMART_MERGE' = 'ON');

DROP TABLE "QBEXSQL"."DemoMerging";

-- Memory Merge
MERGE DELTA OF "QBEXSQL"."DemoMerging"
WITH PARAMETERS ('SMART_MERGE' = 'ON', 'MEMORY_MERGE' = 'ON');

-- Forced Merge

-- Auto Merge

-- Critical Merge

-- Explain Plan:
----------------
-- Use Tables "product", "customer", "fctsales"

SELECT T2."COMPANYNAME",
T3."PRODUCTID",
SUM(T1."NETSALES")
FROM "QBEXSQL"."FCTSALES" T1
INNER JOIN "QBEXSQL"."CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
INNER JOIN "QBEXSQL"."PRODUCT" T3
ON T3."PRODUCTID" = T1."PRODUCTID"
GROUP BY T2."COMPANYNAME",T3."PRODUCTID";
-
EXPLAIN PLAN SET STATEMENT_NAME = 'MyfirstQuery' for
SELECT T2."COMPANYNAME",
T3."PRODUCTID",
SUM(T1."NETSALES")
FROM "QBEXSQL"."FCTSALES" T1
INNER JOIN "QBEXSQL"."CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
INNER JOIN "QBEXSQL"."PRODUCT" T3
ON T3."PRODUCTID" = T1."PRODUCTID"
GROUP BY T2."COMPANYNAME",T3."PRODUCTID";

select * from explain_plan_table;

-- Insert:
----------
-- The INSERT statement adds a record to a table.

CREATE COLUMN TABLE "QBEXSQL"."DemoInsert"


(
"ORDERID" INT NOT NULL PRIMARY KEY,
"ORDERDATE" DATE,
"NETSALES" DECIMAL(10,2),
"REGION" VARCHAR(10)
);

INSERT INTO "QBEXSQL"."DemoInsert" VALUES (1,'2015-01-01',15000.00,'SOUTH');

INSERT INTO "QBEXSQL"."DemoInsert" ("ORDERID","NETSALES") VALUES (2,20000.00);

CREATE COLUMN TABLE "QBEXSQL"."DemoForData"


(
"ORDERID" INT NOT NULL PRIMARY KEY,
"ORDERDATE" DATE,
"NETSALES" DECIMAL(10,2),
"REGION" VARCHAR(10)
);

INSERT INTO "QBEXSQL"."DemoForData" VALUES (5,'2015-01-01',15000.00,'SOUTH');


INSERT INTO "QBEXSQL"."DemoForData" VALUES (6,'2016-01-01',15000.00,'NORTH');
INSERT INTO "QBEXSQL"."DemoForData" VALUES (7,'2017-01-01',15000.00,'SOUTH');
INSERT INTO "QBEXSQL"."DemoForData" VALUES (8,'2018-01-01',15000.00,'NORTH');

INSERT INTO "QBEXSQL"."DemoForData"


(
SELECT * FROM "QBEXSQL"."DemoForData"
);

-- LOAD
-------
--The LOAD statement explicitly loads column store table data into memory instead of upon first access.

CREATE COLUMN TABLE "QBEXSQL"."DemoLoad"


(
"ORDERID" INT NOT NULL PRIMARY KEY,
"ORDERDATE" DATE,
"NETSALES" DECIMAL(10,2),
"REGION" VARCHAR(10)
);
INSERT INTO "QBEXSQL"."DemoLoad" VALUES (1,'2015-01-01',15000.00,'SOUTH');

INSERT INTO "QBEXSQL"."DemoLoad" ("ORDERID","NETSALES") VALUES (2,20000.00);

LOAD "QBEXSQL"."DemoLoad" DELTA;

select loaded from m_cs_tables


where table_name = 'DemoLoad'
and schema_name = 'QBEXSQL';

LOAD "QBEXSQL"."DemoLoad" ALL;

select loaded from m_cs_tables


where table_name = 'DemoLoad'
and schema_name = 'QBEXSQL';

LOAD "QBEXSQL"."DemoLoad" ("ORDERID", "NETSALES");

select loaded from m_cs_tables


where table_name = 'DemoLoad'
and schema_name = 'QBEXSQL';

-- Select
---------

-- SELECT <Select_clause>
-- <from_clause>
-- <where_clause>
--<groupby_clause>
--<having_clause>
--<set Operator>
--<order by Clause>
--<limit>

-- "CUSTOMR", "PRODUCT", "FCTSALES"


-- Examples:
-------------

-- SELECT
----------
SET SCHEMA "QBEXSQL";

SELECT *
FROM "CUSTOMER";

SELECT "COUNTRYNAME"
FROM "CUSTOMER";

SELECT "COUNTRYNAME" AS "Country Name"


FROM "CUSTOMER";

SELECT DISTINCT "COUNTRYNAME" AS "Country Name"


FROM "CUSTOMER";

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER";

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER";

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" = 'USA';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE NOT "COUNTRYNAME" = 'USA';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" = 'USA'
AND "CITYNAME" = 'New York';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" = 'USA'
OR "COUNTRYNAME" = 'UK';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" IN ( 'USA', 'UK' );

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" LIKE 'U%';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE "COUNTRYNAME" LIKE 'U_';

SELECT "CUSTOMERID", "COMPANYNAME"


FROM "CUSTOMER"
WHERE ( "COUNTRYNAME" = 'USA' OR "COUNTRYNAME" = 'UK' )
AND "REGIONNAME" = 'North America';

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" = 1;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" < 5;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" > 110;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" != 1;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" <> 1;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" >= 110;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" BETWEEN 1 AND 5;

SELECT *
FROM "CUSTOMER"
WHERE "CUSTOMERID" NOT BETWEEN 1 AND 5;

SELECT *
FROM "DemoMerging1"
WHERE "ORDERDATE" IS NOT NULL;

SELECT *
FROM "DemoMerging1"
WHERE "ORDERDATE" IS NULL;

SELECT *
FROM "CUSTOMER"
WHERE CONTAINS ("COMPANYNAME", 'Zap Corp.');

SELECT *, Score()
FROM "CUSTOMER"
WHERE CONTAINS ("COMPANYNAME", 'Zap Corp.', FUZZY(0.5));

SELECT SUM("NETSALES") AS "Total Sales"


FROM "FCTSALES";
SELECT COUNT(*)
FROM "FCTSALES";

SELECT MIN("NETSALES")
FROM "FCTSALES";

SELECT M("NETSALES")
FROM "FCTSALES";

SELECT SUM("NETSALES") AS "Total Sales", "CUSTOMERID"


FROM "FCTSALES"
GROUP BY "CUSTOMERID";

SELECT SUM("NETSALES") AS "Total Sales",


"CUSTOMERID","PRODUCTID"
FROM "FCTSALES"
GROUP BY "CUSTOMERID","PRODUCTID";

SELECT SUM("NETSALES") AS "Total Sales", "CUSTOMERID","PRODUCTID"


FROM "FCTSALES"
GROUP BY "CUSTOMERID","PRODUCTID
HAVING SUM("NETSALES") < 50000;

SELECT SUM("NETSALES") AS "Total Sales", "CUSTOMERID","PRODUCTID"


FROM "FCTSALES
GROUP BY "CUSTOMERID","PRODUCTID"
HAVING SUM("NETSALES") < 50000
ORDER BY "CUSTOMERID" ;

SELECT SUM("NETSALES") AS "Total Sales","CUSTOMERID","PRODUCTID"


FROM "FCTSALES"
GROUP BY "CUSTOMERID","PRODUCTID"
HAVING SUM("NETSALES") < 50000
ORDER BY "CUSTOMERID" DESC;

SELECT SUM("NETSALES") AS "Total Sales", "CUSTOMERID","PRODUCTID"


FROM "FCTSALES"
GROUP BY "CUSTOMERID","PRODUCTID"
HAVING SUM("NETSALES") < 50000
ORDER BY "CUSTOMERID" DESC
LIMIT 5;

SELECT SUM("NETSALES") AS "Total Sales", "CUSTOMERID","PRODUCTID"


FROM "FCTSALES"
GROUP BY "CUSTOMERID","PRODUCTID"
HAVING SUM("NETSALES") < 50000
ORDER BY "CUSTOMERID" DESC
LIMIT 5 OFFSET 10;

-- Sub Queries / Sub Select


----------------------------

SELECT SUM("NETSALES") AS "Total Sales","CUSTOMERID","PRODUCTID"


FROM "FCTSALES"
WHERE "CUSTOMERID
( SELECT "CUSTOMERID" FROM "CUSTOMER" WHERE "COUNTRYNAME" = 'USA'
GROUP BY "CUSTOMERID","PRODUCTID"
HAVING SUM("NETSALES") < 50000
ORDER BY "CUSTOMERID" DESC
LIMIT 5 OFFSET 10;

-- Set Operator

( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COUNTRYNAME" = 'USA' )


UNION
( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COUNTRYNAME" = 'UK' )

( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COMPANYNAME" LIKE 'T%' )


INTERSECT
( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COMPANYNAME" LIKE '%p' )

( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COMPANYNAME" LIKE 'T%' )


EXCEPT
( select "CUSTOMERID" FROM "CUSTOMER" WHERE "COMPANYNAME" LIKE '%p' )

-- join
--------
SELECT T2."COMPANYNAME", SUM(T1."NETSALES")
FROM "FCTSALES" T1
INNER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME";

SELECT T2."COMPANYNAME", SUM(T1."NETSALES")


FROM "FCTSALES" T1
LEFT OUTER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME";

SELECT T2."COMPANYNAME", SUM(T1."NETSALES")


FROM "FCTSALES" T1
RIGHT OUTER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME";

SELECT T2."COMPANYNAME", SUM(T1."NETSALES")


FROM "FCTSALES" T1
FULL OUTER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME";

SELECT T2."COMPANYNAME", T3."PRODUCTNAME",SUM(T1."NETSALES")


FROM "FCTSALES" T1
INNER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
INNER JOIN "PRODUCT" T3
ON T1."PRODUCTID" = T3."PRODUCTID"
GROUP BY T2."COMPANYNAME", T3."PRODUCTNAME"
HAVING SUM(T1."NETSALES") < 50000
ORDER BY T2."COMPANYNAME" DESC;

SELECT TOP 10 T2."COMPANYNAME", SUM(T1."NETSALES")


FROM "FCTSALES" T1
INNER JOIN "CUSTOMER" T2
ON T1."CUSTOMERID" = T2."CUSTOMERID"
GROUP BY T2."COMPANYNAME"
ORDER BY SUM(T1."NETSALES") DESC;

create column table t1 ( id int primary key, customer varchar(5), year int, product varchar(5), sales int );
insert into t1 values(1, 'C1', 2009, 'P1', 100);
insert into t1 values(2, 'C1', 2009, 'P2', 200);
insert into t1 values(3, 'C1', 2010, 'P1', 50);
insert into t1 values(4, 'C1', 2010, 'P2', 150);
insert into t1 values(5, 'C2', 2009, 'P1', 200);
insert into t1 values(6, 'C2', 2009, 'P2', 300);
insert into t1 values(7, 'C2', 2010, 'P1', 100);

select customer, year, product, sum(sales)


from t1
group by GROUPING SETS ( (customer, year), (customer, product) );

-- Truncate:
------------
--Deletes all rows from a table. TRUNCATE is faster than DELETE FROM when deleting all records from a table,
--but TRUNCATE cannot be rolled back.
--To be able to rollback from record deletion, DELETE should be used.

-- Use "QBEXSQL"."DemoInsert"

select * from "QBEXSQL"."DemoInsert";


TRUNCATE TABLE "QBEXSQL"."DemoInsert";
-- Truncate & Delete difference:
--------------------------------
SET SCHEMA "QBEXSQL";

CREATE TABLE T (KEY INT PRIMARY KEY, VAL INT);

INSERT INTO T VALUES (1, 1);


INSERT INTO T VALUES (2, 2);
INSERT INTO T VALUES (3, 3);

COMMIT;

DELETE FROM T WHERE KEY = 1;

ROLLBACK;

SELECT * FROM T;
DROP TABLE T;

CREATE TABLE T (KEY INT PRIMARY KEY, VAL INT);

INSERT INTO T VALUES (1, 1);


INSERT INTO T VALUES (2, 2);
INSERT INTO T VALUES (3, 3);
COMMIT;

TRUNCATE TABLE T;

ROLLBACK;

SELECT * FROM T;

-- Unload:
----------
--The UNLOAD statement unloads the column store table from memory. This can be done to free up memory.
--The table will be loaded again on next access.

UNLOAD "QBEXSQL"."DemoLoad";

select loaded from m_cs_tables


where table_name = 'DemoLoad'
and schema_name = 'QBEXSQL';

-- Update
----------
--use Table "QBEXSQL"."DemoForData", "QBEXSQL"."DemoInsert"

SELECT * FROM "QBEXSQL"."DemoInsert" WHERE "ORDERID" = 1;

UPDATE "QBEXSQL"."DemoInsert"
SET "REGION" = 'EAST'
WHERE "ORDERID" = 1;

SELECT * FROM "QBEXSQL"."DemoInsert" WHERE "ORDERID" = 2;

UPDATE "QBEXSQL"."DemoInsert"
SET "REGION" = 'EAST',
"ORDERDATE" = '9999-01-01'
WHERE "ORDERID" = 2;

CREATE TABLE "QBEXSQL"."DemoUpdate"


(
"KEY" INT PRIMARY KEY,
"VAL" INT
);

INSERT INTO "QBEXSQL"."DemoUpdate" ("KEY") VALUES (1);


INSERT INTO "QBEXSQL"."DemoUpdate" ("KEY") VALUES (2);
INSERT INTO "QBEXSQL"."DemoUpdate" ("KEY") VALUES (3);

UPDATE "QBEXSQL"."DemoUpdate" VAL = KEY + 10;

CREATE TABLE "QBEXSQL"."DemoUpdate2"


(
"KEY" INT PRIMARY KEY,
"VAL" INT
);

INSERT INTO "QBEXSQL"."DemoUpdate2" ("KEY") VALUES (1);


INSERT INTO "QBEXSQL"."DemoUpdate2" ("KEY") VALUES (2);
INSERT INTO "QBEXSQL"."DemoUpdate2" ("KEY") VALUES (3);

UPDATE "QBEXSQL"."DemoUpdate2" VAL = "QBEXSQL"."DemoUpdate"."VAL"


FROM "QBEXSQL"."DemoUpdate", "QBEXSQL"."DemoUpdate2"
WHERE "QBEXSQL"."DemoUpdate2"."KEY" = "QBEXSQL"."DemoUpdate"."KEY";

-- Upsert / replace:
---------------------
-- Use Table "QBEXSQL"."DemoUpdate", "QBEXSQL"."DemoForData", "QBEXSQL"."DemoInsert",
"QBEXSQL"."DemoUpdate2"

UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (1,99)
WHERE "KEY" = 1;

UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (9,99)
WHERE "KEY" = 9;

UPSERT "QBEXSQL"."DemoUpdate2"
VALUES (88,99)
WITH PRIMARY KEY;

UPSERT <TABLENAME>
<SUB QUERY>;

-- Functions:
-------------

CREATE FUNCTION "QBEX"."ADD"


(V_A INT, V_B INT)
RETURNS V_C INT
AS
BEGIN
V_C := :V_A + :V_B;
END;

SELECT "QBEX"."ADD"(10,20).V_C FROM DUMMY;

-- Functions:
-------------

CREATE FUNCTION "QBEX"."ADD"


(V_A INT, V_B INT)
RETURNS V_C INT
AS
BEGIN
V_C := :V_A + :V_B;
END;

SELECT "QBEX"."ADD"(10,20).V_C FROM DUMMY;

-- Difference between Procedure & Function:


----------------------------------------------

1. Procedure can return zero or n values whereas function can return one value which is mandatory.

2. Procedures can have input/output parameters for it whereas functions can have only input parameters.

3. Procedure allows select as well as DML statement in it whereas function allows only select statement in it.

4. Functions can be called from procedure whereas procedures cannot be called from function.

5. Exception can be handled by try-catch block in a procedure whereas try-catch block cannot be used in a function.

6. We can go for transaction management in procedure whereas we can’t go in function.

7. Procedures can not be utilized in a select statement whereas function can be embedded in a select statement.
8. UDF can be used in the SQL statements anywhere in the WHERE/HAVING/SELECT section where as
Storedprocedures cannot be.

9. UDFs that return tables can be treated as another rowset. This can be used in JOINs with other tables.

-- Difference between Procedure & Function:


----------------------------------------------

1. Procedure can return zero or n values whereas function can return one value which is mandatory.

2. Procedures can have input/output parameters for it whereas functions can have only input parameters.

3. Procedure allows select as well as DML statement in it whereas function allows only select statement in it.

4. Functions can be called from procedure whereas procedures cannot be called from function.

5. Exception can be handled by try-catch block in a procedure whereas try-catch block cannot be used in a function.

6. We can go for transaction management in procedure whereas we can’t go in function.

7. Procedures can not be utilized in a select statement whereas function can be embedded in a select statement.

8. UDF can be used in the SQL statements anywhere in the WHERE/HAVING/SELECT section where as
Storedprocedures cannot be.

9. UDFs that return tables can be treated as another rowset. This can be used in JOINs with other tables.

-- Returns table
-----------------

CREATE FUNCTION "QBEXSQL"."ReturnTable"


(lv_custid INT)
RETURNS TABLE
( "CUSTOMERID" TINYINT ,
"CITYID" TINYINT ,
"COUNTRYID" TINYINT,
"REGIONID" TINYINT,
"COMPANYNAME" NVARCHAR(20),
"CITYNAME" NVARCHAR(10),
"COUNTRYNAME" NVARCHAR(15),
"REGIONNAME" NVARCHAR(13))
AS
BEGIN
RETURN SELECT * FROM "QBEXSQL"."CUSTOMER" WHERE "CUSTOMERID" < 20;

END;

There are two different kinds of user defined function (UDF):


Table User Defined Function and Scalar User Defined Function in the following table are referred to as Table UDF and
Scalar UDF.
They differ by input/output parameter, supported functions in the body, and the way they are consumed in SQL
statements.

-- DataType Conversion Function


--------------------------------
-- CAST - To convert the expression to a specific DataType

SELECT CAST (10.5 AS INTEGER) "cast" FROM DUMMY;

-- TO_DATE - Converts a date string to Date Data Type

SELECT TO_DATE('12-10-2010', 'DD-MM-YYYY') "to date" FROM DUMMY;

SELECT TO_DATE('2010-10-12', 'YYYY-MM-DD') "to date" FROM DUMMY;

SELECT TO_DATE('20101012', 'YYYYMMDD') "to date" FROM DUMMY;

SELECT TO_DATE('10201012', 'DDYYYYMM') "to date" FROM DUMMY;

-- TO_DATS -- Converts a date string to sap Date Data Type

SELECT TO_DATS ('2010-01-12') "abap date" FROM DUMMY;

-- TO_TIMESTAMP

SELECT TO_TIMESTAMP ('2010-01-11 13:30:00', 'YYYY-MM-DD HH24:MI:SS') "to timestamp" FROM DUMMY;

SELECT TO_TIMESTAMP ('2010-01-11 13:30:00', 'YYYY-MM-DD HH:MI:SS') "to timestamp" FROM DUMMY;

-- Date Time Functions:


------------------------

-- Add Days

SELECT ADD_DAYS (TO_DATE ('2009-12-05', 'YYYY-MM-DD'), 30) "add days" FROM DUMMY;

-- Current Date

SELECT CURRENT_DATE "current date" FROM DUMMY;

-- Current Time

SELECT CURRENT_TIME "current time" FROM DUMMY;

-- Current Timestamp

SELECT CURRENT_TIMESTAMP "current timestamp" FROM DUMMY;


-- DayNAme

SELECT DAYNAME ('2011-05-30') "dayname" FROM DUMMY;

-- Days Between

SELECT DAYS_BETWEEN (TO_DATE ('2009-12-05', 'YYYY-MM-DD'), TO_DATE('2010-01-05', 'YYYY-MM-DD'))


"days between" FROM DUMMY;

-- Extract

EXTRACT ({YEAR | MONTH | DAY | HOUR | MINUTE | SECOND} FROM d)

SELECT EXTRACT (YEAR FROM TO_DATE ('2010-01-04', 'YYYY-MM-DD')) "extract" FROM DUMMY;

-- MONTH

SELECT MONTH ('2011-05-30') "month" FROM DUMMY;

-- Month NAme

SELECT MONTHNAME ('2011-05-30') "monthname" FROM DUMMY;

-- Now

SELECT NOW () "now" FROM DUMMY;

-- Week

SELECT WEEK (TO_DATE('2011-05-30', 'YYYY-MM-DD')) "week" FROM DUMMY;

-- WeekDay

SELECT WEEKDAY (TO_DATE ('2010-12-31', 'YYYY-MM-DD')) "week day" FROM DUMMY;

-- Number Functions:
--------------------

-- ABS

SELECT ABS (-1) "absolute" FROM DUMMY;

-- CEIL - Returns the first integer that is greater or equal to the value n.

SELECT CEIL (14.5) "ceiling" FROM DUMMY;

-- Floor - Returns the largest integer not greater than the numeric argument n.

SELECT FLOOR (14.5) "floor" FROM DUMMY;


-- Mod - Returns the remainder of a number

SELECT MOD (15, 4) "modulus" FROM DUMMY;

-- POWER

SELECT POWER (2, 10) "power" FROM DUMMY;

-- Round

SELECT ROUND (16.16, 1) "round" FROM DUMMY;

-- sign - Returns Sign

SELECT SIGN (-15) "sign" FROM DUMMY;

-- sqrt

SELECT SQRT (4) "sqrt" FROM DUMMY;

-- UMINUS - Returns the negated value of the numeric argument n.

SELECT UMINUS(-765) "uminus" FROM DUMMY;

-- SQL Functions:
-----------------
-- Data Type Conversion Functions
-- DateTime Functions
-- Fulltext Functions
-- Number Functions
-- String Functions
-- Window Functions
-- Miscellaneous Functions

-- String Functions :
--------------------
-- CHAR - To Convert ASCII value value of a number

SELECT CHAR (65) || CHAR (110) "character" FROM DUMMY;

-- CONCAT

SELECT CONCAT ('C', 'at') "concat" FROM DUMMY;

-- LCASE

SELECT LCASE ('TesT') "lcase" FROM DUMMY;

-- LEFT
SELECT LEFT ('Hello', 3) "left" FROM DUMMY;

-- LENGTH

SELECT LENGTH ('length in char') "length" FROM DUMMY;

-- LOCATE

SELECT LOCATE ('length in char', 'char') "locate" FROM DUMMY;


-- REPLACE

SELECT REPLACE ('DOWNGRADE DOWNWARD','DOWN', 'UP') "replace" FROM DUMMY;

-- RIGHT

SELECT RIGHT('HI0123456789', 3) "right" FROM DUMMY;

-- RPAD

SELECT RPAD ('end', 15, '12345') "right padded" FROM DUMMY;

-- Substring After

SELECT SUBSTR_AFTER ('Hello My Friend','My ') "substr after" FROM DUMMY;

-- Substring Before

SELECT SUBSTR_BEFORE ('Hello My Friend','My') "substr before" FROM DUMMY;

-- Sub String

SELECT SUBSTRING ('1234567890',4,2) "substring" FROM DUMMY;

-- Aggregate Functions:
-----------------------
select class, val, offset,
COUNT(*) over (partition by class) as c1,
COUNT(offset) over (partition by class) as c2,
COUNT(*) over (partition by class order by val) as c3,
COUNT(offset) over (partition by class order by val) as c4,
MAX(val) over (partition by class) as m1,
MAX(val) over (partition by class order by val) as m2
from T;

-- First Value, NTILE, Last Value, Nth Value:


----------------------------------------------

select class, val,


NTILE(3) over (partition by class order by val) as nt,
FIRST_VALUE(val) over (partition by class order by val) as first,
LAST_VALUE(val) over (partition by class order by val) as last,
NTH_VALUE(val, 4) over (partition by class order by val) as nth
from T;

-- Lead and LAG functions:


--------------------------
select class, val, offset, LEAD(val) over (partition by class order by val) as lead,
LEAD(val,offset,-val) over (partition by class order by val) as lead2,
LAG(val) over (partition by class order by val) as lag,
LAG(val,offset,-val) over (partition by class order by val) as lag2
from T;

-- Window Functions:
---------------------
RANK() - Returns rank of a row within a partition, starting from 1.
Returns duplicate values in the ranking sequence when there are ties between values.

DENSE_RANK() - Returns ranking values without gaps.

ROW_NUMBER() - Returns unique row number within a partition.

NTILE ( <number_of_buckets> ) - Distributes rows into a specified 'number of buckets'


and assigns the bucket number starting from 1 to each
row in the bucket.

FIRST_VALUE ( <expression> ) - Returns the value of <expression> evaluated at the first row of the window frame.

LAST_VALUE ( <expression> ) - Returns the value of <expression> evaluated at the last row of the window frame.

NTH_VALUE ( <expression> , <n> ) - Returns the value of <expression> evaluated at the <n>-th row
from the first row of the window frame.

<window_aggregate_func> ::= COUNT(*) | <agg_name> ( <expression> )


<agg_name> ::= COUNT | MIN | MAX | SUM |
AVG | STDDEV | VAR

<lead_func> ::= LEAD ( <expression> [, <offset> [, <default_expr>]] )

Returns value of the <offset> rows after current row. The <offset> should be non-negative and default is 1.
If the <offset> crosses boundaries of the partition <default_expr> value is returned.
If the <default_expr> is not specified null value is returned. The <offset> and <default_expr> are evaluated at current
row.
The output of LEAD function can be non-deterministic among tie values.

<lag_func> ::= LAG ( <expression> [, <offset> [, <default_expr>]] )


Returns value of the <offset> rows before current row. The <offset> should be non-negative and default is 1.
If the <offset> crosses boundaries of the partition <default_expr> value is returned.
If the <default_expr> is not specified null value is returned. The <offset> and <default_expr> are evaluated at current
row.
The output of LAG function can be non-deterministic among tie values.

-- Ranking Functions:
---------------------

SET SCHEMA <Schema Name>;

create table T (class char(10), val int, offset int);


insert into T values('A', 1, 1);
insert into T values('A', 3, 3);
insert into T values('A', 5, null);
insert into T values('A', 5, 2);
insert into T values('A', 10, 0);
insert into T values('B', 1, 3);
insert into T values('B', 1, 1);
insert into T values('B', 7, 1);

select class, val,


ROW_NUMBER() over (partition by class order by val) as row_num,
RANK() over (partition by class order by val) as rank,
DENSE_RANK() over (partition by class order by val) as dense_rank
from T;

-- Window Functions
--------------------

<window_fuction> ::= <window_function_type>


OVER (
[ <window_partition_by_clause> ]
[ <window_order_by_clause> ]
)

======================================================
<window_function_type> ::=
<rank_func> | <dense_rank_func> | <row_number_func> |
<percent_rank_func> | <cume_dist_func> | <ntile_func> |
<lead_func> | <lag_func> | <first_value_func> | <last_value_func> |
<nth_value_func> | <window_aggregate_func>

=========================================================

<window_partition_by_clause> ::= PARTITION BY <expression> [ { , <expression> } ... ]

==========================================================
<window_order_by_clause> ::= ORDER BY {<window_order_by_expression>}
<window_order_by_expression> ::= <expression> [ ASC | DESC ] [ NULLS FIRST | NULLS LAST ]

Load balancing in a distributed system

Individual partitions can be distributed across multiple hosts.


This means that a query on a table is not processed by a single server but by all the servers that host partitions.

Overcoming the size limitation of column-store tables

A non-partitioned table cannot store more than 2 billion rows.


It is possible to overcome this limit by distributing the rows across several partitions.
Each partition must not contain more than 2 billion rows.

Parallelization

Partitioning allows operations to be parallelized by using several execution threads for each table.

Partition pruning

Queries are analyzed to determine whether or not they match the given partitioning specification of a table.
If a match is found, it is possible to determine the actual partitions that hold the data being queried.
Using this method, the overall load on the system can be reduced, thus improving the response time.
For example, if a table is partitioned by year, a query restricted to the data of one year is executed
only on the partition with data for this year.

Improved performance of the delta merge operation

The performance of the delta merge operation depends on the size of the main index.
If data is only being modified on some partitions, fewer partitions will need to be delta merged
and therefore performance will be better.

Explicit partition handling

Applications may actively control partitions, for example, by adding partitions to store the data for an upcoming
month.

Analytical Privileges are like analysis authorisatins in BW

There are two types of Analytical privileges


- Static
- Dynamic

Ex: Control the user only to check the data of country Mexico.

How to create:
Click on catalouge - models - Analytical privileges
Name : < >

In the models tab add CV or AV as per requirement.


Input the Associted columns <Country>
to which values < India USA >

How to test.

Create user (give access too sys_bi,syc_bic,sys_repo, and give access to the modesls in current schema )
Ooptions include to edit,maintain,activate.

The same screen would have tabs for Granted roles / system priv / obejct priv / Analytical priv / System priv

System priv - provide the Analytical priv name & Execute.

Execute the report and you can see the data for only authorised privilege.

Dynamic Authorization
--------------------
The authorization should check the table and based on the entry maintained in the table data retrived.

Create table :authtable


-----------
"user" - US
"hana" - MX
"System" - JP

Create table type


----------------
Create type "qbex"."ttcountry " as table (country varchar(6));

create procedure " "." "(out itabcountry "qbex"."ttcountry")


Language SQLScript
SQL Securitydefiner
Reads SQLdata
AS
BEGIN
ITABCOUNTRY= select "country" from "qbex"."authtable" where user = session_user;
END;

There are two types of procedures you can create


1. catalouge procedure
2. Repository Procedure.

Goto Analysis authorisations


go to Assign restrictions =
type:catalouge proceudre <in> <procedure_created>

Analytical View
Properties
------------
Analytical views can be created without attribute views
Attribute views can be consumed in Analytical views.
Measures can only be selected from single fact table.
Multiple fact tables can be used.
Define Central entity for fact tables.
Formulate star join using fact tables and attribute views.

Analytical View = Semantics - Star Join - Data Foundation.


Shared tab you will see all filds frm attribute views.

Data Category
- cube.

Hierarchies cannot be used.


currency & Unit Conversion = possible.
Default Schema for Currency Conversion - SAPABAP1

Cache Validation:
- Hourly / Daily.

If Complex join conditions are used two column views will be created
- OLAP Catalouge Column View.
- Calculation Cataluge Column View.

Calculated Column:
------------------
- Calculated Columns (Hidden / Calculate Before Aggregation - MTD with ex rates of EOM)
what type of calculated column.
- Measure
- Attribute
- Counter.
Calculated columns from attribute views can be seen in Analytical views.

Restricted Columns.
-------------------
Display the data measures for the required period.
- EX Gross Amount for 2012. rest will be ???? similar to conditions to bex.
Filters
------
Possible in Data foundation.

Currency Conversion
-------------------

Go to Semantics - Select measure - and click on assign semantics.


- Source / Target / EX.rt type/ Conv Date/ Schema / Session client etc.
- Upon Failure - Fail / NULL / IGNORE

Variable -
--------
Applies on the data after the whole data is fetched.
Goto Semantics - Variables / input parameters - Create Variable.
Selection type
- Single
- Interval
- Range
- Multiple Entries.
Default Value - Constant / Expression.
Input Parameters
----------------
Applies on the data like a where clause.
Parameter types
- Direct
- Column
- Derived from table.
- Static list.
Default Value - Constant / Expression.

IP as place holder - $$IPREGION$$

Cascading Promts - Region (EMEA) acts as input for Country (DE FR UK)
You can place the input parameter in the filter as place holder which receives
the value during runtime.

Mapping of Input Parameters = MAP IPCountry (region) - IPRegion

Time Travel Queries


--------------------
Check the Enable History
- Create an input parameter of data type date or timestamp
- Assign the input parameter in the next option - History input parameter.

Additional columns would be created for History enable tables, which can be seen in the table M_CS_ALL_Columns.
- $trex_udit$
- $Row_id$
- $Valid_from$
- $Valid_to$

Allow relational optimization: when you have complex calculations in your analytical views which tends to create subselects
and on top of olap catalouge column view one more
calculation column view in these circumstances to improve performance we check allow relatinal optimization.

m_transactions for last commit id information.

Attribute View
PORTS
INDEX - 3XX15
XS - 80XX
HANA as Modelling Tool
-----------------------

Packages
Attribute & Measures
Information Views
-Attribute Views
-Analytical Views
-Calculation Views
Analytical Privileges
Stored Procedures
Decision Tables

Attribute - MD in BW or a Dimension
Measure - Measures or Quantified like KF in BW

HANA server View


----------------

-Catalouge ALL DB related objects

- Column views
- EPM models
- EPM query sources
- Functions
- Indexes
- Procedures
- Sequences
- Synonyms
- Triggers
- Views
- Tables
-Content - All modelling objects
-Schema
-Attribute Views
-Analytical Views
-Calculation Views
-Analytical Privileges
-Procedures
-Decision Tables
Attribute Views can be used in Analytical views
Attribute Views can be used in calculation views
Analytical views can be used in calculation views
calculation views can be used in calculation views
Attribute views and Analytical views can be created with calculation views.

No Reporting on Attribute views


Reporting possible on Analytical and Calculation Views
How Migration is done in HANA
-----------------------------
Import source system meta data of physical tables -Migrate the DB

Data Provisioning is done using SLT or BODS

Create information modesl - AT V, AN V, CV

Deploy column views from _sys_bic

Cosume the views in BO using BICS or MDX in IDT of BO

Why not modelling in BO?


--------------------------

The same joins can be used in BO, however since BO is application layer
the joins performed in HANA would push the logic deep into HANA DB

Types of Attributes
--------------------
- Simple
- Calculated
- Local (remove reference to behave differently)

Types of Measures
-----------------
- Simple
- Calculated
- Restricted
- Counters
Atrribute views - Semantics - Datafoundation

Sys_repo - Activation requests


Sys_demo - Demo content

Text join - Language Specific Join

Attribute Views ( Data Category - Dimension)


---------------
No Measures

Types
- Standard
- Time
- Derived (only desc can be changed) (Ex. Supplier & Biz Partner)
Hierarchies
----------
- Level based. - City / Country etc
- Parent Child based. - Employee / Manager
Other options include
Multiparent
Orphan nodes show under root node.

Time based Attributes


----------------------
Tables are found in Sys_BI schema
- M_Time_Dimension
- M_Time_Dimension_Month
- M_Time_Dimension_Week
- M_Fiscal_calender

Calenders - Fiscal / Gregorian

Data filled with Quick View - Fill time data input granularity Year / Month / Day.

Time Tables - T009 T009B


Unit Tables - T006, T006 B

Attribute View Properties


--------------------------
Data Category - Dimension
Type - Time/ Standard/ Derived.
Privileges.
- Blank - Cannot Choose.
- Analytical - Row Level Security.
- Sql - Based on Column View from _Sys_Bic schema.

Default Client: Data retrival based on the client logged in.

Multiple Key Fileds - Should come from same table.

Key feild is mandatory.

Enable history - Time travel queries.

Cache Validation - Daily / Hourly.

Generate Concatenate attributes - in case of multiple keys ($MAT$Plant) new key would be created
can be checked in the table M_CS_ALL_Coloumns.

Filter - can be applied on the choice of your column.

How to do simple reporting - using Analysis for OLAP which integrates with excel.

How to display Label - Go to Semantics and change the label column of the attribute.

Asssigning Semantics - Amount with currency code


- Quantity with Unit of Measure
- UOM Date
- Geo Location.

Types of Joins === Max No of Keys are 60

- Text Join
- Referential Join
- Inner Join
- Left outer join
- Right outer Join.

If Complex join conditions are used two column views will be created
- OLAP Catalouge Column View.
- Calculation Cataluge Column View.
Calculation View
Everything that can be done by Attribute views and Analytical views can be done with CALC views.

Multiple fact tables are supported in CV

Two methods of creation


- Graphical based
- SQL Script based

Data Categories
- Cube - Measures / Reporting possible.
- Dimension - No Measures allowed
- Blank - Measures allowed but no reporting, can be used in another CV

Attribute views can only be created with Column store tables

Analytical View.
- Column store tables
- Attribute views

where as Calculation view can be created with

- Attribute View
- Analytical View
- Database table
- Column view
- Table UDF

Nodes in Calculation View

- Join
- Union
- Projection
- Rank
- Aggregation

Data Category - Default output nodes.


Cube - Semantics - Star join -
(without star join) - Semantics - Aggregation.
Dimension - Semantics - Projecton - Data Foundatin
Blank - Semantics - Projection.

Calculation View Sub types


- Standard
- Time.

Calculation View Script View

Semantics - SQL script.

Graphical calculation view can be used in Script based view, Script based view can be used in graphical.

Union - Can accept multiple nodes.


Join - 2 inputs and 1 output
Projection - 1 input and 1 output, with filter on row level and column level /
no filter cannot be applied on default node.
Aggregation - 1 input and 1 output
for aggregation and for filter also.
Rank - 1 input and 1 output
performs a partition on the data set and do a order by on the data set.

Joins such as Temporal and Referential joins can only be used in the star join created with Calculation view with data category
of cube with star join.
In calculation view with type dimension - joins such as inner , left outer, right outer, text j

If there are many joins execute the calculation view in SQL engine.

How Time dimension included


---------------------------

Create an attribute view of type time and use the same in calculation view in the join node and map the date from the output to
the time.
This would bring the hierarchies and other related time objects into your projection.

In the Aggregation node


- select the measures and right click and say add to aggregated column.

Calculation Engine Instantiation Process


---------------------------------------
if there are many joins inside the query and the net output is only few records, where as the backend query reads thousands of
records, if we set the propery of the CV to execute in CV then a new query would be written for the whole data that is being
extracted
for the columns which are only required.

NULL behaviour ( IN SQL ENGINE OPERATIONS WILL NULL ARE ALWAYS NULL)
---------------------
1 + Null =1 NULL
1* NULL =1 NULL

UNION NODE
-----------

Union node is used to join multiple values of similar structure


ex sales of India + sales of America.

Union is used to join the data which was earlier split in Bw for scalability.

Ranking
--------

Create a projection and send the output to ranking

Options in Ranking
-----------------
Sorting Direction
- Ascending
- Descending
Threshold - 3 i.e TOP 3 in India, TOP 3 in AMerica etc.
The above can be made dynamic by IP

Partition by - Region.

Calculation Views with Script based.


===================================

The only node you see is script view.

You should create the columns which would create the structure of the VAR_OUT, you can take the help of the tables while
creating the columns.

If the table from which the data is being read is has got a different column name then use "as" in SQL Query

Select "Country" as "countries" from address.

you can use CE functions to read the data.

Ex: ITAB1 = CE_COLUMN_TABLE(TABLE,["COL1","COL2"]);


VAR_OUT = CE_PROJECTION(:ITAB1,["COL1","COL2" AS "COUNTRY"]);

YOU CAN USE INPUT PARAMETERS WHERE EVER REQUIRED WITH PLACEHOLDERS $$REGION$$

INPUT PARAMETERS CAN ONLY BE PASSED IN SELECT AND NOT IN CE FUNCTIONS.

Using OLAP Catalouge Column View


================================
YOU CAN USE COLUMN VIEWS GENERATED FROM ANALYTICAL VIEW IN CALCULATION VIEW SCRIPT
BASED.

IF IT IS A OLAP CATALOUGE COL VIEW USE AGGREGATION FUNCTIONS AND GROUP BY


FUNCTIONS IN SCRIPTING.

HOWEVER USE "AS" SO THAT THE NAME OF THE COLUMN IN QUERY MATCHES WITH THE COLUMN
MENTIONED IN THE VAR_OUT STRUCTURE.

VAR_OUT = Select sum(" ") as Grossamount, sum(" ") as Netamount from "_sys_bic"." "."<View name>
(Placeholder."$$ipreg$$" => "EMEA"); or use an input parameter instead of emea i.e., :IPREG

If there is a requirement to run the procedure daily, you can make use of .xs job which can be used to run the procedure in
HANA native development.

Istep 2 scenario in Calculation views (MTD YTD WTD)


---------------------------------------------------

You have a calculation view where you have to input MTD YTD MANUALLY,

Procedure to derive the dates

Declare < > varchar(8);


Declare < > date;

If :< > is not null then ===CHECK FOR INPUTTED DATE NULL / NOT NULL
<> = :< >
then
Select to_date(:< >,"YYYYMMDD") INTO < > FROM DUMMY;

SELECT CONCAT(LEFT(:lv_date,6),'01') into < > from dummy;

select concat(left(:lv_date,4),'0101') into < > from dummy;

var_out = Select <>,<>,<>,<>,SUM(YTD) AS YTD, SUM(MTD) AS MTD FROM "_SYS_BIC"." "


(PLACEHOLDER.$$IP_FYEAR$$ => VARIABLE1,
PLACEHOLDER.$$IP_FYEAR$$ => VARIABLE1,
PLACEHOLDER.$$IP_FYEAR$$ => VARIABLE1)
Group by < >,< >,< >.
Converting Script based CV to Graphical
=========================================
Two Options - Save as - New Graphical Cv gets created.
- Migrate - Existing gets migrated.

During this process Calculatin view gets converted as Table UDF, hence TUDF is also one of the sources of CV.

However the created CV cannot be activated from HANA Modeller, go to HANA Native development and activate it.

Hierarchies would only be accessible if the underlying attribute views are used in Starjoin.
What is Keep Flag:
------------------
If you have scenarios where you are doing exception aggregation, which might need reporting at a bottom level aggregation
and if the column defining
the granularity is not included in the result set this can cause an issue and hence all such columns have to be marked as keep
flag.
This is similar to calculate before aggregation in AV.

Property found in column properties.

Ex: Prod Prodcat QTY Price Amount

If aggregation is done only on prodcat which is sports the price would be aggregated and multiplied since price is specific to
each product.

Filtering cannot be applied on the default Aggregation and Projection nodes.

Transparent filter
------------------
If queries containts filters that you have not choosen to project and if you have defined a counter on a CV which you want to
use in another calculation view
then you need to set transparent flag as true for such columns of all nodes that contains this column and for the node in
caculation view that countains the counter.

Ex count (distinct), ex products prodcut type sales person counter.

If we have sales person in (S1 S2 S3) and if you choose to not include sales person in projection 1 and if the same is sent to
aggregation note to the top this would
give wrong outpout. (refer Day 26 26:54)

Dynamic Join:
============
Dynamic join is used when join is played on two columns, such as region and country, if you woud like to calcuate % of
marks, like percentile, the value can change if
you are calculating on region or on country, if dynamic join is not selected calculation always happens on the lowest
granulaity.

If we set this join, aggregation happens before the join is played on and gives desired output.

optimize join
============

Join performance is improved if multiple columns are joined and if the cardinality is n:1 in left outer join, by doing so
additional data is removed while making the join.

Propagate to semantics: If you would like to dd a filed in the bottom node and instead of adding it to each and every node,
right click and say propagate to semantics,
which would add in all nodes.
Input parameters in calculation view
-------------------------------------
DIRECT
COLUMN
DERIVED FROM TABLE
STATIC LIST
DERIVED FROM PROCEDURE
-- create procedure " "." " (out result_country string);
Language SQLscript
SQL securityinvoker
Reads SQL data
AS
BEGIN
declare v_country sring;
declare v_region := "APJ";
Declare cursor cursor_tmp for
Select "country" from <>.<> where region = :v_region;
Open cursor_tmp;
fetch cursor_tmp into v_country;
result_country := v_country;
end;

Provide execute on this procedure to sys_repo


Decision tables are used for
1 What if analysis
2 for updates like update
3 used to transform the data
4 ETL for business users

Go to Models - Click on decsion table

The nodes include DECSION TABLE - DATA FOUNDATION

The nodes in the side include Vocabulary


- Attributes
-
Conditions - values are updated based on which condition.
Actions - Action is the column which is being updated.
For example in you would like to give bonus to SAP by 20% and QA by 10%

Select the column against the QA and SAP and give "price" * 10 and press ALT+ENTER

This would create a procedure in sys_bic schema with a case statement.

EX. CASE When "tech" = 'sap' then price = price * 10


When "tech" = 'QA' then price = price * 20
ELSE price
END
How to simulate or do what if analysis
--------------------------------------
Create new parameter with therequired data type.
and select it as action.
right click and set the dynamic value and provide the calculation you want to perform.

When activated this would create a column view in sys_bic schema.

you can include both the views in calculation view projections and create a comparitive analysis model.
This is used to make use of ECC extractors and load the data into HANA

If we have a customer who has got ECC with only BW component and got many data sources
in such scenarios we use DXC

Customer has got ECC and BW, few of the data sources to DXC and for few he would like to
send the data to hana.

some enntities where we do not have BW reporting and we would like to send the data into HANA

You can make your ECC exclusive for DXC or a mixed approach

Global - Exculisive for DXC


System - only few clients use DXC
Datasource - only specified all others can be used with BW

HOW to make the setting

se38 - sap_rsadmin_maintain

Execute that program and maintain the entry PSA_to_HDB as Datasource.


what is hana live

Hana live is like business content in pbw which is provided by SAP

All the content provided in HANA live is calculation views

1 Private Views
2 Reuse Views
3 Query Views
Reuse Views = Multiple private views
Query Views = Multiple Reuse views

The content can be found in HANA modeller


Content - SAP - HBA - ECC

What is schema mapping:


======================

If the schema avilable in the r/3 is probschema where the models are available and in qty if it is qtyschema or schema in
hana live is dirrent from the schema we have in our system then do schema mapping so that the objects wuld come n save in
the local schema

The logic applies for multiple ecc systems consolidated into single ECC and having a BW on top of it.

Visualize Plan:
===============

Right click on SQL query and say visualise the plan, which would give the run time of the selected queries.

Tranporing from HAna server 1 to Hana server2

DAta base schemas and catalouge objects are not transported, only content is transported

only views are transported however the underlying data is not transported, the data in dev and qty can be different.

Content - packages are only transportable items in hana native transport. data base is only exported and imported.

Steps to do transportation
==========================

Quick view - click on delivery units.

Create delivery unit - ProdDU

Assign the package to the delivery unit / or double click on the package and assign to DU

When the views are transported to the qty the underlying table maybe referring to a different schema.
hence the schema needs to be replaced.

How to manage:

Go to Quick view - Schema mapping.

How to transport the delivery units.


==================================

HTTP://sapqbex800/sap/hana/login/xs/lm

LM is life cycle management

HOME PRODUCTS TRANSPORT UPLOAD/DOWNLOAD SETTINGS MANAGESYSTEM

The opening page would have two options in the settings.

1 Enable native hana transport.


2 Enable CTS transport. (transport request like in BW)

Goto Trannsport tab;


You will find 3 options
System Trasports Logs

System:

Register the target system ex. QTY

Transport tab.

Create a transport route between DEV & QTY

Steps:
Route Name:
Target
Source
Type:Full / Delt
Transport : DU / Product transpport.

<select package>

Once the TR is selected, select the TR and click on start transport.

Logs would provide the success / fail of the Transports.

How is data base exported / imported.


=====================================

Go to file
Export
- content
- catalouge objects
Catalouge objects
- 7qbex - add tables that you would like to export.

Options: Binary or CSV


Target: server / local etc

Go to target system
File - import
if you would like to change the the schema
modify the schema name in the files exported to your local workstation.

Usually in SLT the tables are already created in the target system, however if you have created some
tables manually and you would like to export them in adhoc, you use export and import

If there are any errors the views may not be active.

The views between DEV n QTY are transfered by hana native transprt.
where as the data in the tables is transferred between the systems using the data
provisioning techniques.

You cannnot transport one view, entire package has to be transported.

HOW TO TRANSPORT BW OBJECTS ALONG WITH HANA


===========================================

How to connect delivery units to TR

for that we need hana transport container

GOTO ABAP perspective.


------------------
Create ABAP project.

Create a package and in tha right click and say others - SAP HANA transport container.
Attach delivery unit.

Inner Join-

only the matching values would come, for example if telangana MD is inner joined with country sales,

we woud onlly get the sales made the employees of telangana.

inner join is recognised by all the engines.

Left Outer

Common values from inner and unmatched values from left.

Right outer

Common values from inner and unmatched vaues from right.

Referentical joins

Only recongnised by the OLAP engine.


even the referential join is used in attribute view join engine cannot recognise the join and works as inner.
If the same attribute view is used in analytical view, if the data from both the tables is
used then inner join is executed, however if you read only from one table the left outer join
is executed.

Referential join is used when there is a certain link between two tables like header n item.

Text Joins
If multiple values are maintained for customers / products in different languages, this can result in duplcate
records in such situation we would go for text join.

Temporal Join

TEmporal join can be formed only in star join.


if you are joining the customer from history enabled table then you cannot execute the innerjoin

in such case you would go for temporal join.


you can find temporal properties when you click on a join in star join node
option include: temporal column = zdate
temporal condition = Include both / Exclude both / Include to exclude from / Exclude to include from
from column = dateto
to column = todate

temporal column should be on the right hand side and date to and from should be from the left hand side.

Sapital joins: join based on the longitude and latitute join between gps enable objects is called sapital join.
Smart data Access

users various tools and brings data in to HANA.

Consumes various sources and consume the data as virtual tables.

IN HANA

Select * from adapters;

This would list all the adapters available in HANA, this is a list of general ODBC connections
using which we can connect to the HANA.

These include

Oracle
MSSQL
ODBC
BWAdapter
HANA adapter. DPA
File adapter. DPA

Data provisioning Agenet is a separate licensing which should be installed.

Install it and connect to HANA DB.

Select * from Agents;

This would give the list of agents registered, using the adapters we can connect to the source system
and consume the tables as virtual tables into HANA

EX: Remote cube in BW.


How to create
-------------
GOTO provisioning - Remote Sources - Right click and create

Source name :
Adapter name:MSSQL ECC Adapter
Source location: Index server.

Input all the required Database parameters to connect.

Once the connection is established - You can see the tables in the provisioning folder in HANA

How to create virtual table.


--------------------------
Select the required table from remote sources and double click

This would create a new virtual table


IF the changes are made in the source system that would reflect in HANA automatically.

This is realtime view of the data as we are consuming the tables virtually.

Connecting HANA 2 HANA


----------------------

Create a new remote source using HANA ODBC driver.

ECC can be accessed by connecting to SQL server and read its tables.

How to importflat files in hana using SDA


----------------------------------------

HANA has already got File adapter which can be used to extract the flatfile.

from SP9 we have a concept called smart data quality and integration whihc can be done in realtime
and in batchmode where you can do transfrmations.

Create a calculation view using Virtual table.

Steps
-----
Get into HANA native development.
Systems- SP9
- Content
- Schema
- Create package.

- Create Local folder


create a data flow similar to BODS
This conect is still under intial phase which would further get integrated into HANA native development.
The important concept about this SDAQI is installing ODBC drivers.

ONCE SDA installed HANA would also support ETL concepts aswell and this is still under development.

MAJOR provisioning tools are SLT and BODS


-----------------------------------------

KEEP SLT BODS n SDA in resume.

Sample questions
----------------
1 Why using both SLT and BODS

for real time we used SLT and replicated the data.


for other reporting where its not real time where we used ODP concepts in data sources and used reporting.

Real time anaysis is done based on the no perisheble products, based on foot fall where no of passengers enterning the malls
etcs.

SLT is used where real time is need for around 25 tables.

IF you are projecting BW on HANA.

Use BW as the ETL concept and using HANA only for modelling, if you want virtual tables to be consumed use SDA

You should tell how BODS works n what is what.

Where did you use SDA


---------------------

Pharma company has got BW on HANA ECC on HANA

not allowed to load HANA live on BW, but that was done on ECC, but hana live is on ecc and hence
they connected ECC HANA and BW HANA using SDA.

Source system for BODS where BW can project

DB2 due to source constraint used BODS to middleware.

All SAP - HANA


All non SAP - Hadoop

HANA - SDA - Hadoop

The various type of data provisioning include

SLT - SAP landscape replication server.


BODS
DXC
SDA -can also make hana real time.
SDA Quality.

HANA is not a realtime databdase, but by making the use of SLT we can make it behave like a real time by bringing the data
into
hana in real time.

Transaction systems include CRM & APO, hcm or an 3rd party system.

Data provisionining is similar to extraction in BW.

SLT acts as middle ware ABAP server between the source and the target.

SLT can also push the data into PBW from latest versions.

Components of SLT
- READ engine.
- Write Engine.
- mapping and transformation engine.

Installation of SLT
KERNEL 7.0
ABAP 7.2
SLT

SAP ECC - SAP SLT is connected using the RFC connection, where as HANA as data base and it is connected using DB
connection.

To avoid multiple logins, HANA holds a data provisioning cockpit where you can operate the replication using SLT.
however actual replication is done by the SLT . THIS CAN ALSO BE DONE FROM SLT.

How does replication happens


---------------------------
All the read and write modules are the fucntion modules.
s
Examples of application tables

- VBAK
- VBAP
- VBUK

REPLICATION STEPS.
-----------------
1 All historical data is loaded into hana first - similar to init load in bw
2 once loading is done a data base trigger is created in the source system/
3 3 triggers would be created for insert update and delete.
4 Log tables are created - consists of primarykey of the table.

Background Mechanism
--------------------
As soon as the records are updated in the source tables the data base triggers gets fired
and the same is captured in the log tables, the same is pushed into HANA in real time

Data can be transformed such as filering additions calculatios etc in real time using the
SLT mapping and transformation engine.

SLT Configuration: LTR


Source : ECC
Target: HANA
Frequency: real time /batch

For each SLT configuation there will be a schema created in HANA and all the tables replicated
using the configuation will be saved that schemma in HANA.

SLT server can be maintained in the ECC system as all the technical configuration need is preconfigured in ECC.

The configuation can be done between multiple ECC to single HANA or multiple ECC to multiple HANA.

Configuration can be done from source to hana in 1:N or N :1 ratio

from one ECC data can be sent to max of 4 hana systems

if the same structred tables are sent to hana, if the data is similar then create a new column with sourcesystem
similar to compounding attribute in bw

SLT Configuration:

1. Create RFC SM59 SLT ---> ECC


2. create SLT Configuration ---> Tcode LTR
Name: SLT2ECC
Source: Sap/Nonsap
Connection to HANA(MAX4)
Also configure the no of jobs required to run the SLT.
Replication process: Real time / time in seconds (frequency).
from sp5 real time data can be pushed to BW also.

The new schema would be created in HANA with the same name as the connection.

GO to Quick view - Data provisioning.

As soon as the configuration is created 3 tables would be replicated into HANA


which would bring all the metadata related to the source system
DD02T - description of the tables
DD08L - tables and columns
DD02L - list of tables.

How to replicate
-----------------
QUICK view - data provisioning - Click on replicate.
Other options inlcude.
LOAD - Brings only historical data
Replicate - Brings historical data and creates a log with a trigger in source. .
STOP replication - deletes the triggers and logs
Suspend - triggers and logs stays intact and data will be captured in ECC logs
Resume. - resumes the logs

GOTO ECC TCode - LTRC.

Tabs include
AdministrationDATa - Processing Steps - Table overiew - data transfer monitor - application logs - load statistcs - experts.

In table overiew you can see the tables that are being replicated.

List triggers
--------------
Tcode: IUUC_Triggers

Replicating directly from SLT


------------------------------
GOTO TCODE LTRC
table overview tab
click on data provisioning.
Provide mass transfer id : 14

In LTR double click on the cnnection created

You will find jobs & Connections. / you can also start and stop the masterjob from here.

Masters Job: triggers all the jobs related to data capture.

Transformations in SLT:
======================
Use cases
Conversion of Data
Filtering
Structural changes - Add or Remove columns while we transfer the data to hana.
Partitioning of the table in SLT while being transferred to HANA.

How data is processed.


.......................
Data is proceseed in portion by portion. like data package in bw.
Loop is run through each portion, changes made to each record and updated to the target structure.

Tcode for setup of transformations:


..................................
IUUC_REPL_CONTENT OR LTRS
select the schema and table.

Tabs in LTRC

1. IUUC REPL TBSTG: Table settings, this would help in defining the type of target table and the no of columns
in that table
HERE YOU CAN ADD ADDITIONAL COLUMN OR CHANGE THE STRUCTURE IN THE TARGET
STRUCTURE (CLICK ON EDIT TABLE STRUCTURE).
Partition can be done here in the partition syntax: [ partition by hash (vbeln) partitions 4 ]

2. IUUC ASS RUL MAP: Assignment of rule mappings, if there is a new clumn that has been added or if you would like to
filter the data or perform additional transformation this tab is used

There are two types of rules


1 Event based
2 Parameter based

if you would like to implement 5 years, insert 5 rows each row is one rule.

list of events.
----------------
E/BOP - Begin of processing
E/BOT - Begin of Transaction
E/BOL - Begin of Loop
E/BOR - Begin of Record

Event based code:


----------------
Field symbols for sending objects : <wa_s_tabname>-fieldnname (Source structure)
<wa_r_tabname>-sourceid (Targetstructure)

EX: <wa_r_ysales>.sourceid = 'ECC'.

You can choose to write a line of code which should not be more than 72 charecters or else you can use an include program.

Out of 6 columns you see in this tab 2 are for event rest 4 are to pass parameters.

Event based is simple and more flexible.

Export filed is the value for which you would like to populate data
-----------

Ex:
If you are inserting the source as a new column, in export column mention [sourceid] and import p1 as ['ECC']
Now in the code line -- e_sourceid = i_p1.

You can also create an include program and use it for transformation .EX. SE38 program.
You can use If condition in the programs and can use inbuilt macros of SLT such as skip_record to filter data.
3. IUUC SPC PROCOPT is used to filter out the data at the source itself and this filter is for delta records only.
The similar filter can be applied here so that it would apply directly at database level.

4.IUUC PERF OPTION this is the place where the no of jobs required to perform the transformations are mentioned.

Other settings:
---------------
No_drop : data in HANA will not be deleted before replication
ROW_store: Row store table will be created instead of column
rd_portion_size: no of records per portion.

Once stop replication


=====================
Once stop replication if we want to replicate again that would bring the historical data onceagain.
however if you do not want to do that like init delta without data transfer

How to resume delta without bringing historical data??


How to activate trigger and recreate the log tables??

There is RSTABLE, any action performed for replication an entry would be created in the RStable similar to log tables in
Evian.
rs
within HANA there is a schema called Sys_repl which will have the table RS_REPL_COMPONENTS, Every step of
configuration of slt is seen in this table.

T- Create table without loading


L - Load without replication
R - Start replication
C - Stop replication
S - Suspend
W - resume
M - activate delta recording
Q - Start replication process.

insert records into RS_ORDER first with entry as M and then Q.

Extracting the data from NON SAP system


----------------------------------------
1. Establish the connection from SLT --------> NON SAP server
Tcode - DBCO
test the connection using ADBC_test.
2. GO to LTR create configuration.
Source
Target
No of Jobs

HOW TO USE SLT for BW


---------------------

Used to replicate
NON SAP data of custom tables from ECC
Add delta capabilities where no standard extractor available.

Scenario 1.
-----------
SAP ECC (ANY) - SLT - BW backed by HANA
make use of transient providers / virtual providers and report in BW.

Scenario 2.( Without HANA using ODP)


-----------
Pushing real time data directly into BW DSO.

Minimum requirement is BW 7.3 and to get realtime data into BW there is no need to HANA.

There is a new source system connection in BW


ODP-SLT.

Steps
----
Go to SLT
Create configuration
Source : ECC (RFC)
Target: ECC / HANA ( RFC / DB connect)

While configuring the Target Choose Scenario.


- BW PSA replication scenario
- ODP replication scenario
- Standard RFC scenario
Give Queue Alias name : SLTBPR

Goto BW.
Create a source system connections.
CONN_SLT_S
and provide the SLT server details, once it is connection it would give the ODP repllication
scenario name created in SLT.

click on connection and create a data source.

ODP source: SLT


ODP provider: Ysales
DatasourceName:ODPDS
SourceSystem:
Type:TD

create DSO
Create Transformation between DSO &
*** for ODP no need of IP and data can be extracted using DTP
on first instance DTP is full.
Convert this DTP to a real time DTP to extract the data in BW in real time.

ECC data gets posted - Trigger activates - log updates - Transformations LTRS - LTRC replicate / Load - pushes the data in to
BW

Delta Queue for ODP data sources is ODQMON

When you want to consume the data source into multiple data sources, the ODP comes into picture.

Datasource sent to BW and also to BODS with indiviual DQ's.

New Tcode LTRS


--------------
Advanced Replication settings
-Performance options
-Rule Assignment
-Table settings
-Trigger Options

Rules here are Event Based or Field Based.

Trigger based coding:

SQL / ABAP / ETC

All the options are same however only the user interface is different.

In the new version of LTRS you can also export a database view aswell.

For that go to LTRS - Table settings - give view name and further input in struture template, this would create a new table
using the
view template as the structure.

We might encounter an issue while pushing the data into BW using SLT, in such case a badi needs to be switched on / off.

/nLTRC
Go to expert functions
Click on activate / deactivate badi implementation.

You might also like