Friday, June 11, 2010

DDL and Logon Triggers

/*


DDL Triggers to capture events in the database

drop trigger ddltrigger_altertable on database

drop trigger ddltrigger_drop_table on database

drop table dbo.ddl_table_log

drop table dbo.ddltable_test

*/

------------------------------------------------
-- 1. DDL Trigger to capture alter table events
------------------------------------------------
-- create a table to capture alter table events

create table dbo.ddl_table_log
(
eventtime datetime,
eventowner nvarchar(100),
eventtsql nvarchar(4000)
)
go
--------------------------------------------------
/*
create the ddl trigger for alter table
Note; XML is case sesnitive
So /EVENT_INSTANCE/TSQLCommand/CommandText)[1]
should be provided as it is
If you change the case it will not work
*/
create trigger ddltrigger_altertable
on database for alter_table
as
declare @data xml
set @data = EVENTDATA()
insert into dbo.ddl_table_log
values
(
GETDATE(),
CURRENT_USER,
@data.value('(/EVENT_INSTANCE/TSQLCommand/CommandText)[1]','nvarchar(4000)')
)
--------------------------------------------------
-- Now, create a table to test alter table command to log
create table dbo.ddltable_test
(
orgid int identity(1,1) primary key ,
orgname varchar(20) not null
)
go

-- Insert a record
insert into dbo.ddltable_test(orgname)
values ('DivaInfotech')
go

select * from dbo.ddltable_test
go
/*
Now there is a new organization
which requires column width to be increased
increase orgname from varchar(20) to varchar(80)
*/
alter table dbo.ddltable_test
alter column orgname varchar(80) not null
go

-- check alter table commad is logged into ddl_table_log
select * from dbo.ddl_table_log
go

-- insert orgname having value greater than 20
insert into dbo.ddltable_test(orgname)
values ('Indian Oil and Natural Gas Corporation Limited')
go
-----------------------------------------------
/*
In a business enviornment, it may be required
that even system administrator should not drop
or truncate tables by accident.
*/
------------------------------------------------
-- 2. DDL Trigger to prevent drop table
------------------------------------------------
create trigger ddltrigger_drop_table
on database for drop_table
as
print ' You can not drop and truncate the table'
rollback
------------------------------------------------
-- Let us try to drop the table
drop table dbo.ddl_table_log
go
------------------------------------------------

/*
Logon triggers

Logon triggers can be used to record information
of the login user or prevent a particular user
to logon during business hours
*/

create trigger checklogontrigger
on all server for logon
as

declare @currenttime datetime = getdate()
if ORIGINAL_LOGIN() = 'maintuser'
and @currenttime between '09:00' and '18:00'
begin
   print ( 'Maintenance users can not login in business hours')
   rollback
end

-----------------------------------------------

Monday, May 3, 2010

SQL Server T SQL 2008/2005 Enhancements

-----------------------------------------------------------------------------------
-- SQL Server 2008 Enhancements
-----------------------------------------------------------------------------------

/*
Syntax Enhancements
Intialize variables inline as part of variable declaration statement
Instead of declare and set statements separately
we can combine both into one statement in declare.
*/
use tempdb

go
if OBJECT_ID('dbo.portposition','U') is not null
drop table dbo.portposition;
go

create table dbo.portposition
(ason date,snam varchar(7),cusip varchar(13),marketvalue numeric (10,2)
primary key (ason,snam,cusip)
)

insert into dbo.portposition (ason,snam,cusip,marketvalue)
values ('01/31/2009','ax0001','acvbfghi',100.25),
('02/28/2009','ax0001','acvbfghi',200.50),
('03/31/2009','ax0001','acvbfghi',300.75),
('01/31/2009','ay0001','acvbfghi',100.25),
('02/28/2009','ay0001','acvbfghi',200.25),
('01/31/2009','az0001','acvbfghi',100.25);

declare @ason datetime ='12/31/2008' ,@snam varchar(7)='AA1YZY'
select top 100 * from port_position
where ason = @ason and SNAM = @snam
------------------------------------------------------------------------
/*
Compound Operators:
Compound assignment operators are same as
in C++ and C#; they execute some operation
and set an original value to the result of the operation.
They help in writing cleaner and abbreviate code.
(It also works in the SET clause of an UPDATE statement).

Supported compound operators in SQL Server 2008.
This is applicables to columns ,variables whereever assignment is normally used
 += Add and assign
 -= Subtract and assign
 *= Multiply and assign
 /= Divide and assign
 %= Modulo and assign
 &= Bitwise AND and assign
 ^= Bitwise XOR and assign
 = Bitwise OR and assign
*/
declare @quantity numeric(10,4) = 10.2584,
@cost numeric(10,2) = 200.80

declare @marketvalue numeric(10,2) = @quantity * @cost,
@total@marketvalue numeric(10,2) = 100
set @total@marketvalue += @marketvalue
select @quantity as quantity,
@cost as cost,
@marketvalue as marketvalue,
@total@marketvalue as total@marketvalue
go

declare @quantity numeric(10,4) = 10.2584,
@cost numeric(10,2) = 200.80
declare @marketvalue numeric(10,2) = @quantity * @cost,
@total@marketvalue numeric(10,2) = 100

set @total@marketvalue *= @marketvalue
select @quantity as quantity,
@cost as cost,
@marketvalue as marketvalue,
@total@marketvalue as total@marketvalue
go

declare @quantity numeric(10,4) = 10.2584,
@cost numeric(10,2) = 200.80
declare @marketvalue numeric(10,2) = @quantity * @cost,
@total@marketvalue numeric(10,2) = 100
set @total@marketvalue =+ @marketvalue

select @quantity as quantity,
@cost as cost,
@marketvalue as marketvalue,
@total@marketvalue as total@marketvalue
go
------------------------------------------------------------------------
/*
Table value constructor through VALUES clause
Single VALUES clause can be used to construct a set of rows.
This allows multiple rows to be inserted based on single
*/
use tempdb
go

if OBJECT_ID('dbo.country','U') is not null
drop table dbo.country;
go

create table dbo.country
( countryid int identity(1,1) primary key, country varchar(80) not null);

insert into dbo.country(country)
values
( 'India'),
('USA'),
('Russia'),
('France');

select * from dbo.country;

insert into dbo.country(country)
values
( 'Newzeland'),
( 'Switzerland'),
( 'Mozambique'),
( 'Ethiopia'),
(null);

select * from dbo.country;
/*

INSERT statement runs in atomic transaction , failing insert on one row.
entire insert fails
*/

/*
VALUES can also be used for derived tables, CTES.
This will avoid using unions .
This can be used as any table expression in joins where, group by etc
*/
-- VALUES clause as a derived table
select * from
(VALUES
( 'India'),
('USA'),
('Russia'),
('France')
) AS countrytable(countrycolumn);
------------------------------------------------------------------------
/*
-- New DATE and TIME Data types
DATE - 3 Bytes - 01-01-0001 to 12/31/2009 - accuracy of 1 day
TIME - 3 to 5 Bytes - 100 nanoseconds accuracy
DATETIME2 6 to 8 Bytes - 01-01-0001 to 12/31/2009 - 100 nanoseconds accuracy
DATETIMEOFFSET 8 to 10 Bytes - 01-01-0001 to 12/31/2009 - 100 nanoseconds accuracy
DATE -- Only Stores Date portion -- 'YYYY-MM-DD'
TIME -- Only Stores Time Portion -- 'hh:mm:ss.nnnnnnn'
DATETIME2 -- Combination of DATE and TIME types
-- 'YYYY-MM-DD hh:mm:ss.nnnnnnn'
DATETIMEOFFSET -- DATETIME2 with timezone offset
-- 'YYYY-MM-DD hh:mm:ss.nnnnnnn +05.30'
-- TIME,DATETIME2,DATETIMEOFFSET can be specified with precision of fractional seconds
*/
declare @dt date = '2010-01-01' ,
@ti time = '22:45:16.1234567',
@dt2 datetime2 = '2010-01-01 22:45:16.1234567',
@dtoffset datetimeoffset = '2010-01-01 22:45:16.1234567 +02:00';
select @dt as dateval,@ti as timeval,@dt2 as datetime2val,
@dtoffset as datetimeoffsetval
declare @dt4 datetime2 = '2010-01-01 22:45:16.1234567'
declare @dt01 date,@tm time
set @dt01 = @dt4 -- truncates time portion
set @tm = @dt4 -- display time portion
select @dt4,@dt01,@tm

declare @tm1 time(7) = '23:44:25.1235567', @tm3 time(3),@tm4 time(4),@tm5 time(5)
select @tm3 = @tm1, @tm5= @tm1, @tm4 = @tm1
select @tm1,@tm3,@tm4,@tm5
------------------------------------------------------------------------
/*
New Datetime functions
SYSDATETIME() - Returns datatype datetime2
SYSUTCDATETIME() - Returns datatype datetime2 with UTC
SYSDATETIMEOFFSET() - Returns datatype DATETIMEOFFSET with systemtimezone
*/
select
GETDATE() as currentdatetimegetdate,
SYSDATETIME() as currentdatetime,
SYSUTCDATETIME() as currentdatetimeUTC,
SYSDATETIMEOFFSET() as currentdatetimetimezone

select
CAST(SYSDATETIME() as date) as currentdate,
CAST(SYSDATETIME() as time) as currenttime
-- using CAST(SYSDATETIMEcolumn as date) , optimizer uses the index
-- DATEADD, DATEDIFF,DATENAME,DATEPART add support to micro and nano seconds
------------------------------------------------------------------------
/*
Table Types and Table Valued Paramaeters
Table Types -- can save table definition in the database
and can be used to define table variables
Table Valued Paramaeters -- Enable to pass parameter of table type
to SP and functions
*/
use tempdb
go

--drop type dbo.pptype
CREATE TYPE dbo.pptype as table
( ason date,snam varchar(7),cusip varchar(13),marketvalue numeric(18,2))
go

declare @tblpp as dbo.pptype
insert into @tblpp(ason,snam,cusip,marketvalue)
values ('01/01/2009','AXYZ2','74834L100',123.45)

select * from @tblpp
select * from sys.table_types -- to get all table type metadata
go

create procedure dbo.usp_list_port_position
(@tblportposition as dbo.pptype READONLY )
as
set nocount on

select * from @tblportposition;
return
go

declare @tblpp as dbo.pptype
insert into @tblpp(ason,snam,cusip,marketvalue)
values ('01/01/2009','AXYZ2','74834L100',123.45)

exec dbo.usp_list_port_position @tblpp -- passing table as parameter
go
-- When passing from .NET pass the data type as SqlDbType.Structured
------------------------------------------------------------------------
/*
MERGE Statement

This is a single statement which combines INSERT,UPDATE and DELETE
acton as a single opertion based on condtional logic
More efficient than individual statements applied separately
Target Table: specified in MERGE INTO clause
Source Table: specified in USING clause each case

WHEN MATCHED THEN
WHEN NOT MATCHED BY TARGET THEN
WHEN NOT MATCHED BY SOURCE THEN

Atleast one case need to be specified
Supports OUTPUT clause
*/
use tempdb
go
if OBJECT_ID('dbo.portposition','U') is not null
drop table dbo.portposition;
go
create table dbo.portposition
(
ason date,snam varchar(7),cusip varchar(13),marketvalue numeric (10,2)
primary key (ason,snam,cusip) )

insert into dbo.portposition (ason,snam,cusip,marketvalue)
values ('01/31/2009','ax0001','acvbfghi',100.25),
('02/28/2009','ax0001','acvbfghi',200.50),
('03/31/2009','ax0001','acvbfghi',300.75),
('01/31/2009','ay0001','acvbfghi',100.25),
('02/28/2009','ay0001','acvbfghi',200.25),
('01/31/2009','az0001','acvbfghi',100.25);

if OBJECT_ID('dbo.temp_portposition','U') is not null
drop table dbo.temp_portposition;
go
create table dbo.temp_portposition
( ason date,snam varchar(7),cusip varchar(13),marketvalue numeric (10,2)
primary key (ason,snam,cusip) )

insert into dbo.temp_portposition (ason,snam,cusip,marketvalue)
values ('01/31/2009','ax0001','acvbfghi',1000.25),
('02/28/2009','ax0001','acvbfghi',2000.50),
('03/31/2009','ax0001','acvbfghi',3000.75),
('03/31/2009','ay0001','acvbfghi',4000.25);

MERGE INTO dbo.portposition as TGT -- Target
USING dbo.temp_portposition as SRC -- Source
ON TGT.ason = SRC.ason
AND TGT.snam = SRC.snam
AND TGT.cusip = SRC.cusip
WHEN MATCHED THEN
UPDATE SET
TGT.marketvalue = SRC.marketvalue
WHEN NOT MATCHED THEN -- present in source not in target
INSERT (ason,snam,cusip,marketvalue)
VALUES (src.ason,src.snam,src.cusip,src.marketvalue)
WHEN NOT MATCHED BY SOURCE THEN -- present in target not in source
DELETE ;

SELECT * FROM dbo.portposition;
------------------------------------------------------------------------
-- GROUPING SETS
--select * from port_position
select snam,YEAR(ason)asonyear,month(ason) asonmonth ,SUM(marketvalue)as marketvalue from port_position
where Ason is not null and SNAM > ''
group by
GROUPING SETS (
(SNAM,YEAR(ason),month(ason)),
(SNAM,YEAR(ason)),
(SNAM)
);

select snam,YEAR(ason)asonyear,month(ason) asonmonth ,SUM(marketvalue)as marketvalue from port_position
where Ason is not null and SNAM > ''
group by GROUPING SETS
(
(SNAM,YEAR(ason)),
(SNAM,month(ason))
);

-- ROLL UP AND CUBE are enahanced
-- ROLL UP equivalent to
-- GROUPING SETS ({SNAM,YEAR,MONTH},{SNAM,YEAR},{SNAM},{} )
select snam,YEAR(ason)asonyear,month(ason) asonmonth ,SUM(marketvalue)as marketvalue from port_position
where Ason is not null and SNAM > ''
group by ROLLUP( SNAM,YEAR(ason),month(ason) );
------------------------------------------------------------------------
-- SPARSE Columns
/*
Sparse columns are optimized for the storage of NULLs.
When considerable portion of rows ( > 70 %) for columns contains NULLs ,
it is better to define columns as SPARSE.
Sparse columns do not consume when NULL is stored , but storage of non NULL values
become more expensive.
*/

/*
Single XML column can be defined as a column set to hold
all sparse columns
This is useful when there are large number of sparse columns
in a table and operating individually will be come difficult
When XML column set is used, select * and select individual columns
returns different results

select * results XML columns
select individual columns returns columns in relational way
*/

/*
-- Filtered Indexes and Statisitcs
Non clustered indexes on predicates (where clause) stores
rows honuoring filter condition in B-Tree
Also manual statistics can be created on filter conditions
Optimizer evalutes appropriate indexes and statistics during query execution
Well desigend filter indexes improves query performance
and are smaller than usual indexes .

Index/Statisitc maintenance like reindexing,defragmnetation,
update statistics, storage are reduced using filtered indexes
*/

use tempdb
go

if OBJECT_ID('dbo.portposition','U') is not null
drop table dbo.portposition;
go
create table dbo.portposition
( ason date,snam varchar(7),cusip varchar(13),marketvalue numeric (10,2),
accruedincome numeric(10,2) null primary key (ason,snam,cusip)
)
insert into dbo.portposition (ason,snam,cusip,marketvalue,accruedincome)
values ('01/31/2009','ax0001','acvbfghi',100.25,null),
('02/28/2009','ax0001','acvbfghi',200.50,28),
('03/31/2009','ax0001','acvbfghi',300.75,null),
('01/31/2009','ay0001','acvbfghi',100.25,null),
('02/28/2009','ay0001','acvbfghi',200.25,null),
('01/31/2009','az0001','acvbfghi',100.25,null);
go

create nonclustered index idx_portposition_accruedincome
on dbo.portposition(accruedincome)
where accruedincome is not null ;
create nonclustered index idx_portposition_ason
on dbo.portposition(ason)
where ason >= '02/01/2009' ;

create nonclustered index idx_portposition_marketvalue
on dbo.portposition(marketvalue)
where marketvalue >= 200;

select * from dbo.portposition where accruedincome is not null;
select * from dbo.portposition where ason between '02/01/2009' and '03/31/2009'
select * from dbo.portposition where marketvalue between 200 and 400;
------------------------------------------------------------------------
/*

Object dependency is improved and provides accurate information
sys.sql_expression_dependencies -- object dependencies by name
sys.dm_sql_referenced_entities -- DMF provides all entities that the
input entity references ( entity depending on other entities)
this replaces older sys.sql_dependencies

sys.dm_sql_referencing_entities -- DMF provides all entities that refrence
the input entity ( all entities depending on the input entity)
*/
select
OBJECT_SCHEMA_NAME(referencing_id) as srcschema,
OBJECT_NAME(referencing_id) as srcname,
referencing_minor_id as src_minor_id ,
referenced_schema_name as tgtschema,
referenced_entity_name as tgtname,
referenced_minor_id as tgtminorid
from sys.sql_expression_dependencies
order by srcschema,srcname

-- entity depending on other entities
-- procedure dbo.getOrgUnit depends on

select
referenced_schema_name as objschema,
referenced_entity_name as objname,
referenced_minor_name as minorname,
referenced_class_desc as class
from sys.dm_sql_referenced_entities ('dbo.getOrgUnit','OBJECT')
order by objschema,objname

select
referenced_schema_name as objschema,
referenced_entity_name as objname,
referenced_minor_name as minorname,
referenced_class_desc as class
from sys.dm_sql_referenced_entities (null,'OBJECT')
where referenced_minor_name = 'orgunitid'
order by objschema,objname
-- all entities depending on the give entitty

-- entities depending on the table dbo.account

select
referencing_schema_name as objschema,
referencing_entity_name as objname,
referencing_class_desc as class
from sys.dm_sql_referencing_entities ('dbo.account','OBJECT')
order by objschema,objname
-------------------------------------------------------------------
--Convert function enhanced for converting hex values to binary
-- These styles convert a hex string literal to a binary value that contains the same digits
-- when presented in hex form and vice versa.
-- New Style Number is added

-- Style 1 -- with 0x

-- Style 2 -- without 0x

SELECT
CONVERT(VARCHAR(12) , 0x49747A696B , 1) AS [Bin to Char 1],
CONVERT(VARBINARY(5), '0x49747A696B', 1) AS [Char to Bin 1],
CONVERT(VARCHAR(12) , 0x49747A696B , 2) AS [Bin to Char 2],
CONVERT(VARBINARY(5), '49747A696B' , 2) AS [Char to Bin 2];
------------------------------------------------------------------------
Hierarchy
------------------------------------------------------------------------
USE tempdb;
IF OBJECT_ID('dbo.Orgunit', 'U') IS NOT NULL DROP TABLE dbo.Orgunit;
CREATE TABLE dbo.Orgunit
(
OrgunitID INT NOT NULL,
hid HIERARCHYID NOT NULL,
lvl AS hid.GetLevel() PERSISTED, -- Computed column and persisted for performance
OrgUnitName VARCHAR(100) NOT NULL,
OrgTypeID char(2),
CONSTRAINT PK_Orgunit PRIMARY KEY NONCLUSTERED(OrgunitID)
);

/*
1. GetLevel method of the hid column to define the persisted computed column level.
2. This method returns the level of the current node in the hierarchy.
3. The HIERARCHYID value: A child’s sort value is guaranteed to be higher than the parent’s sort value.
( topological sorting; a node’s sort value is guaranteed to be higher
than all of its ancestors)
*/
CREATE CLUSTERED INDEX idx_depth_first ON dbo.Orgunit(hid);
-- Above index can use to fnd all descendents of a node
CREATE INDEX idx_breadth_first ON dbo.Orgunit(lvl, hid);

-- Above Index on lvl and hide use to find all nodes from the same level
-- and direct subordinates of a node
--drop index orgunit.idx_breadth_first
--drop index orgunit.idx_depth_first
/*
Inserting New Nodes
1. To insert a node into the hierarchy, a new HIERARCHYID
value to be created that represents the correct position in the hierarchy.
2. HIERARCHYID::GetRoot() method to produce the value for the root node.
3. GetDescendant method to produce a value below a given parent.
4. GetDescendant method accepts two optional HIERARCHYID input
values representing the two nodes between which
new node can be positioned .
which adds a new node to the hierarchy:
*/

IF OBJECT_ID('dbo.usp_AddOrgUnit', 'P') IS NOT NULL DROP PROC dbo.usp_AddOrgUnit;
GO

CREATE PROC dbo.usp_AddOrgUnit
@OrgunitID AS INT,
@ParentOrgunitID AS INT = NULL,
@OrgUnitName AS VARCHAR(100),
@OrgTypeID AS char(2)
as
set nocount on
DECLARE
@hid AS HIERARCHYID,
@Parent_hid AS HIERARCHYID,
@last_child_hid AS HIERARCHYID;

IF @ParentOrgunitID IS NULL
SET @hid = HIERARCHYID::GetRoot();
ELSE
BEGIN
-- Find Hierarchy ID of ParentOrguniID
SET @Parent_hid = (SELECT hid FROM dbo.Orgunit WHERE OrgunitID = @ParentOrgunitID);
-- There may several children for the ParentOrgunitID
-- Find the maximum child hierearchyid
SET @last_child_hid =
(SELECT MAX(hid) FROM dbo.Orgunit
WHERE hid.GetAncestor(1) = @Parent_hid);
-- Create a new hierarchyID in between Parent hierchyid and max child hierchyid
SET @hid = @Parent_hid.GetDescendant(@last_child_hid, NULL);
END

INSERT INTO dbo.Orgunit(OrgunitID, hid, OrgUnitName, OrgTypeID)
VALUES(@OrgunitID, @hid, @OrgUnitName, @OrgTypeID);
GO

EXEC dbo.usp_AddOrgUnit @OrgunitID = 0, @ParentOrgunitID = NULL, @OrgUnitName = 'Harrow Partners Ltd' , @OrgTypeID = 'CO';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 1, @ParentOrgunitID = 0, @OrgUnitName = 'HP Division Business Unit' , @OrgTypeID = 'DI';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 2, @ParentOrgunitID = 1, @OrgUnitName = 'HP BD' , @OrgTypeID = 'BD';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 3, @ParentOrgunitID = 2, @OrgUnitName = 'BD ADV Firm1' , @OrgTypeID = 'AF';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 4, @ParentOrgunitID = 0, @OrgUnitName = 'HP RIA Firm1' , @OrgTypeID = 'RI';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 5, @ParentOrgunitID = 3, @OrgUnitName = 'HP Region 1' , @OrgTypeID = 'RG';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 6, @ParentOrgunitID = 1, @OrgUnitName = 'HP Branch 1' , @OrgTypeID = 'BH';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 8, @ParentOrgunitID = 6, @OrgUnitName = 'New Orgunit' , @OrgTypeID = 'CO';

EXEC dbo.usp_AddOrgUnit @OrgunitID = 9, @ParentOrgunitID = 1, @OrgUnitName = 'New Orgunit' , @OrgTypeID = 'CO';
/*
Querying the Hierarchy
hid value represents binary,ToString method on hid provides logical string representation of the value, which shows the path with a slash sign used as a separator between the levels.
*/
SELECT C.OrgunitID, C.OrgUnitName,P.OrgunitID ParentOrgunitID,c.lvl as hierarchylevel,C.hid.ToString() AS path
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON C.hid.GetAncestor(1) = P.hid
order by c.lvl,c.hid

SELECT C.OrgunitID, C.OrgUnitName,P.OrgunitID ParentOrgunitID,c.lvl as hierarchylevel,C.hid.ToString() AS path
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON C.hid.GetAncestor(1) = P.hid
order by c.hid
/*
Graphical depiction of the hierarchy— sort the rows by hid,
and produce indentation based on the lvl column as follows:
*/
SELECT orgunitid,
REPLICATE(' ', lvl) + OrgUnitName ,
hid.ToString() AS path
FROM dbo.Orgunit
ORDER BY hid;
/*
To get all child orgunits of an orgunit (subtree), use a method called IsDescendantOf
This method accepts a node’s HIERARCHYID value as input
and returns 1-- true or 0 -- false if queried node is a descendant of the input node.
The following query returns all child orgunits—direct and indirect—of orgunit 1:
*/
SELECT C.OrgunitID, C.OrgUnitName, C.lvl
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON P.OrgunitID = 1
AND C.hid.IsDescendantOf(P.hid) = 1;-- 1 - true , 0 - false
/*
IsDescendantOf method can also be used to return all parent orgunits of a given orgunit.
The following query returns all parent orgunits of orgunit 3:
*/
SELECT P.OrgunitID, P.OrgUnitName, P.lvl
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON C.OrgunitID = 3
AND C.hid.IsDescendantOf(P.hid) = 1
/*
To get a whole level of child orgunits of a certain orgunit,
use the GetAncestor method. This method accepts a number (call it n) as input
and returns the HIERARCHYID value of the ancestor of the queried node, n levels above.
The following query returns direct child orgunits(1 level below) of orgunit 1,2,3:
*/
-- Level 1 below OrgunitID = 1
SELECT C.OrgunitID, C.OrgUnitName,P.OrgunitID as ParentOrgunitID
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON P.OrgunitID = 1
AND C.hid.GetAncestor(1) = P.hid
order by c.hid

-- Level 1 below OrgunitID = 0
SELECT C.OrgunitID, C.OrgUnitName
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON P.OrgunitID = 0
AND C.hid.GetAncestor(1) = P.hid;

-- Level 2 below OrgunitID = 0
SELECT C.OrgunitID, C.OrgUnitName
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON P.OrgunitID = 0
AND C.hid.GetAncestor(2) = P.hid;

-- Level 3 below OrgunitID = 0
SELECT C.OrgunitID, C.OrgUnitName
FROM dbo.Orgunit AS P
JOIN dbo.Orgunit AS C
ON P.OrgunitID = 0
AND C.hid.GetAncestor(3) = P.hid;

-- HIERARCHYID::Parse Method -- converts string to hierarchyid
-- GetReparentedValue -- Reparent Nodes (old root,new root )
-- Current Path -- /1/1/2/3/2/, old root -- /1/1/, new root -- /2/1/4/
-- New Path /2/1/4/2/3/2/

Friday, April 16, 2010

Physical Operations

/*

Lack of Indexes or memory pressure results in high physical I/O .
To find queries with high physical I/O , run the following script

NOTE:  This may take considerable time , so use TOP operator
to  restrict number of rows

*/

select top 50 sql.text, p.query_plan,qs.execution_count
from sys.dm_exec_query_stats as qs
cross apply sys.dm_exec_sql_text(sql_handle) sql
cross apply sys.dm_exec_query_plan(plan_handle) p
where query_plan.exist('
declare default element namespace http://schemas.microsoft.com/sqlserver/2004/07/showplan;
/showplanxml/batchsequence/batch/statements//relop/@physicalop[. = sql:variable("@op")]') = 1
order by execution_count desc

go

Tuesday, April 13, 2010

I/O related Queries

-----------------------------------------------------------

-- I/O related Queries
----------------------------------------------------------
/*
Log Sapce Information
*/
select rtrim(pc1.instance_name) as [database name]
, pc1.cntr_value/1024.0 as [log size (mb)]
, cast(pc2.cntr_value*100.0/pc1.cntr_value as dec(5,2))
as [log space used (%)]
from sys.dm_os_performance_counters as pc1
join sys.dm_os_performance_counters as pc2
on pc1.instance_name = pc2.instance_name
where pc1.object_name like '%Databases%'
and pc2.object_name like '%Databases%'
and pc1.counter_name = 'Log File(s) Size (KB)'
and pc2.counter_name = 'Log File(s) Used Size (KB)'
and pc1.instance_name not in ('_Total', 'mssqlsystemresource')
and pc1.cntr_value > 0

GO
------------------------------------------------------------------------

/*
The two columns io_stall_read_ms and io_stall_write_ms
represent the time SQL Server waited for Reads and Writes
issued on the file since the start of SQL Server.
poll the command for small duration and then compare it with baseline numbers.
*/
select db_name(database_id) as dbname , file_id,
io_stall_read_ms, io_stall_write_ms
from sys.dm_io_virtual_file_stats(null, null)
order by (io_stall_read_ms+ io_stall_write_ms) desc;
GO
------------------------------------------------------------------------

Utility Queries 01

/*

Idle process with open transaction
*/

select spid,kpid,blocked,status,cmd,open_tran,dbid,
datediff(s,last_batch,getdate()) as idle_secs,waittype,lastwaittype,waitresource,
login_time,last_batch,cpu,physical_io,memusage
from sys.sysprocesses
where status = 'sleeping' and open_tran > 0
---------------------------------------------------------------------------------------------------
 
/*

Transaction Running in the current session
*/
select * fromsys.dm_tran_current_transaction
 
---------------------------------------------------------------------------------------------------

--Number of threads used


select count(*) as 'Number of threads'

from sys.dm_os_waiting_tasks
where wait_type <> 'THREADPOOL';
go
---------------------------------------------------------------------------------------------------
 /*


Check whether a DAC is in use by running
the following query. If there is an active DAC,
the query will return the server process id (spid)
for the DAC; otherwise, it will return no rows.
*/

select t2.session_id
from sys.tcp_endpoints as t1
join sys.dm_exec_sessions as t2
on t1.endpoint_id = t2.endpoint_id
where t1.name='Dedicated Admin Connection';
GO

-------------------------------------------------------------------------
/*
protocol used for the current connection:
*/
select net_transport
from sys.dm_exec_connections
where session_id = @@spid;
GO

-------------------------------------------------------------------------
 

Monday, April 12, 2010

Estimated Time of Completion of Backup/Restore

/*
If you have started backup/restore process, you would like to know,
percentage of completion and approximate time, the process will take to complete

Estimate completion of long-running backup or restore commands.
Use the following code to display the Estimated Time  and Percentage Completion
*/

select r.session_id,r.command,
convert(numeric(6,2),r.percent_complete) as [percent complete],
convert(varchar(20),dateadd(ms,r.estimated_completion_time,getdate()),20) as [eta completion time],
convert(numeric(6,2),r.total_elapsed_time/1000.0/60.0) as [elapsed min],
convert(numeric(6,2),r.estimated_completion_time/1000.0/60.0) as [eta min],
convert(numeric(6,2),r.estimated_completion_time/1000.0/60.0/60.0) as [eta hours],
,convert(varchar(100),(select substring(text,r.statement_start_offset/2,
case
when r.statement_end_offset = -1 then 1000
else (r.statement_end_offset-r.statement_start_offset)/2
end
)
from sys.dm_exec_sql_text(sql_handle)))
from sys.dm_exec_requests r
where command in ('restore database','backup database')
-----------------------------------------------------------------------------------------------------

Memory Related Queries

----------------------------------------------------------------------

-- Memory Related Queries
---------------------------------------------------------------------
/*
dbcc memorystatus
Memory manager = VM Committed + AWE Allocated = Total Physical Memory for the instance
Single Page
Check CACHE_STORE_OBJCP to check how much memory SP,Funtions,Triggers,Views are consuming
Check CACHE_STORE_SQLCP to check how much memory adhoc queries are consuming
Check CACHE_STORE_XPROC to check how much memory xtended SPs are consuming
Check Buffer Pool
Committed - Total Physical Memory Committed
Target - Required
If Target > Committed Memory Pressure
If Target = Committed OK
If Target < Committed Memeory will Shrink
Free - Free Memory Available ( Not Commited /Reserved )
Stolen -- This is memory stolen by SQL Intenal components from Buffer Pool
Check Procedure Cache for
TotalProcs,TotalPages,Inuse pages
*/
dbcc memorystatus
------------------------------------------------------------------
-- Investigate Buffer pool and Procedure Cache Memory allocation
declare @sqlobject varchar(80)
exec @sqlobject = dbo.lib_fn_sqlobject
select object_name,counter_name,instance_name,cntr_value,
(cntr_value*8)/1024 as size_MB
from sys.dm_os_performance_counters a
where
(a.object_name = @sqlobject+'Buffer Manager' and counter_name = 'Database pages')
or (a.object_name = @sqlobject+'Plan Cache' and counter_name = 'cache pages')
---------------------------------------------------------------------

-- Total amount of memory consumed (including AWE) by the buffer pool:
select sum(multi_pages_kb + virtual_memory_committed_kb
+ shared_memory_committed_kb
+ awe_allocated_kb)/1024.00/1000.00 as [used by bpool, gb]
from sys.dm_os_memory_clerks
where type = 'MEMORYCLERK_SQLBUFFERPOOL';
-----------------------------------------------------------------------------

/*
Internal memory pressure occurs because internal
components have stolen pages from buffer pool,
Identify internal components that are stealing
the most pages from buffer pool
Note: single_pages are allocated from Buffer Pool
multi_pages are allocated from outside buffer pool
*/
select type, sum(single_pages_kb)/1024.00 as stolen_mem_mb
from sys.dm_os_memory_clerks
group by type
order by stolen_mem_mb desc;
---------------------------------------------------------------------

/*
Internal components that have
allocated memory outside of buffer pool by using
multipage allocator
*/
select type, sum(multi_pages_kb)/1024.00 as multi_page_allocated_mb
from sys.dm_os_memory_clerks
where multi_pages_kb != 0
group by type
order by multi_page_allocated_mb desc;
---------------------------------------------------------------------
/*

Total : single_page_allocator + multi_page_allocator
for internal components
*/
select type, sum(single_pages_kb + multi_pages_kb)/1024.00 as total_mem_mb
from sys.dm_os_memory_clerks
group by type
order by total_mem_mb desc;

OR
--see the amount of memory allocated in the multipage units.

select name, type, (single_pages_kb/1024.00) single_mb,
(multi_pages_kb/1024.00) multi_mb,
(single_pages_in_use_kb/1024.00) single_use_mb,
(multi_pages_in_use_kb/1024.00) multi_use_mb
from sys.dm_os_memory_cache_counters
where type = 'CACHESTORE_SQLCP' OR type = 'CACHESTORE_OBJCP';

---------------------------------------------------------------------

Compilation and Recompilation Information from Performance Counters

First create the following function  which will capture information from the current instance of SQL Server

create function dbo.lib_fn_sqlobject ()

returns varchar(80)
as
begin

 declare @sqlobject varchar(80)
 select @sqlobject = object_name
 from sys.dm_os_performance_counters
 where object_name like '%sql statistics%'
 and counter_name = 'sql compilations/sec'
 set @sqlobject = substring(@sqlobject,1,charindex(':',@sqlobject))
 return @sqlobject
end

/*

Check Number of Compilations and Recompilations / Sec
If % of Compliations and Recompilations compared
to batch requests are high ( > 50 %) , you need to check
queries are properly parameterized.

*/

declare @sqlobject varchar(80)
exec @sqlobject = dbo.lib_fn_sqlobject
select b.counter_name,a.cntr_value,b.cntr_value,b.cntr_value *100.00/a.cntr_value as percentage
from sys.dm_os_performance_counters a,
sys.dm_os_performance_counters b
where a.object_name = @sqlobject+'SQL Statistics'
and a.counter_name in ('Batch Requests/sec')
and b.object_name = @sqlobject + 'SQL Statistics'
and b.counter_name in ('SQL Compilations/sec','SQL Re-Compilations/sec')
----------------------------------------------------------------------

/*

Check Number Safe Auto-Params and Failed Auto-Params /sec
If Failed Auto-Params /sec Percentage returns a high value ,
queries are creating new plans . Try to use sp_executesql and
check the query parameters for high faile auto parameterization.
This will occur mostly for adhoc queries.

You can test whether setting parameterization to 'FORCED' at database level

will improve the performance. Do a test on development server before promoting to production server.
*/

declare @sqlobject varchar(80)
exec @sqlobject = dbo.lib_fn_sqlobject
select b.counter_name,b.cntr_value *100.00/a.cntr_value as percentage
from sys.dm_os_performance_counters a,
sys.dm_os_performance_counters b
where a.object_name = @sqlobject+'SQL Statistics'
and a.counter_name in ('Auto-Param Attempts/sec')
and b.object_name = @sqlobject+'SQL Statistics'
and b.counter_name in ('Failed Auto-Params/sec','Safe Auto-Params/sec')
----------------------------------------------------------------------
/*
Check Plan Cache Hit Ratio for each cache Object type
If  % is less than 80 % investigate further
*/



declare @sqlobject varchar(80)

exec @sqlobject = dbo.lib_fn_sqlobject

select b.instance_name,a.cntr_value base ,b.cntr_value counter,
b.cntr_value *100.00/a.cntr_value cache_hit_ratio
from sys.dm_os_performance_counters a,
sys.dm_os_performance_counters b
where a.object_name = @sqlobject+'Plan Cache'
and a.cntr_type = 1073939712
and b.object_name = @sqlobject+'Plan Cache'
and b.cntr_type = 537003264
and a.instance_name = b.instance_name
and a.cntr_value > 0
----------------------------------------------------------------------

High CPU Usage due to recompilation

/*

High CPU Pressure can also happen due to queries/batches
that are being recompiled frequently
plan_genration_num indicates multiple plans .
If  plan_genration_num  > 0 , then there is recompilation
*/

select top 50 plan_generation_num, execution_count,
db_name(depamain.dbid) as dbname,object_name(depamain.objectid,depamain.dbid) as objname,
(
select substring(text, statement_start_offset/2 + 1,
 (
  case when statement_end_offset = -1 then len(convert(nvarchar(max),text)) * 2
  else statement_end_offset
  end - statement_start_offset)/2
 )
from sys.dm_exec_sql_text(sql_handle)
) as query_text
from sys.dm_exec_query_stats
outer apply
(
 select dbid,objectid from

  (
    select attribute,cast(value as int) as attvalue
    from sys.dm_exec_plan_attributes(plan_handle)
    where attribute in ('dbid','objectid')
) as depa
pivot
 (
  max(depa.attvalue) for depa.attribute in ("dbid", "objectid")
 ) as depapvt
) as depamain
where plan_generation_num >1
order by plan_generation_num desc;

----------------------------------------------------------------------

Find the state of the process and count on each scheduler

-- Find the state of the process and count on each scheduler


select count(*) noofrows ,t1.state as processorstate, t2.scheduler_id
from sys.dm_os_workers as t1, sys.dm_os_schedulers as t2
where t1.scheduler_address = t2.scheduler_address
and t2.scheduler_id < 255
group by t2.scheduler_id,t1.state
order by scheduler_id

Brief Description about WAITs

---------------------------------------

-- WAITS
---------------------------------------
/*
Some of the wait types should be ignored which are system wait types
create a view to ignore certain wait types
*/
create view dbo.lib_vw_ignore_waittypes
as
select 'DBMIRRORING_CMD' as wait_type
union all
select 'BROKER_EVENTHANDLER'
union all
select 'BROKER_TRANSMITTER'
union all
select 'BROKER_RECEIVE_WAITFOR'
union all
select 'ONDEMAND_TASK_QUEUE'
union all
select 'REQUEST_FOR_DEADLOCK_SEARCH'
union all
select 'DBMIRROR_EVENTS_QUEUE'
union all
select 'SQLTRACE_BUFFER_FLUSH'
union all
select 'BAD_PAGE_PROCESS'
union all
select 'CHECKPOINT_QUEUE'
union all
select 'DBMIRROR_EVENTS_QUEUE'
union all
select 'LAZYWRITER_SLEEP'
union all
select 'LOGMGR_QUEUE'
union all
select 'KSOURCE_WAKEUP'
go
-------------------------------------------------------------------------
/*
Performance of the query depends on Current Waits
availbale in sys.dm_os_waiting_tasks.
Find waits currently occuring on the system
check wait_duration_ms to identify waits with high wait duration
select * from sys.dm_os_waiting_tasks

Troubleshooting Blocking

1. Identify blocking
2. Cause of blocking
3. Remove the cause of blocking

select * from sys.dm_os_waiting_tasks provides blocking information
Session currently executing is associated with a task

Some systems tasks will not have session_id

1. waiting tasks columns
a. waiting_task_address ( unique internal memory address of the object
representing the task)
b. session_id ( user session associated with the task.
Association between session and task is only for the duration of the task )
c. exec_context_id
2. blocking task information columns
a. blocking_task_address
b. blocking_session_id
c. blocking_exec_context_id

3. wait information

a. wait_type ( current_wait_type for the waiting_task_address)
b. wait_duration_ms ( duration of the current wait)
c. resource_address ( memory address of the resource on which task is waiting)
d. resource_description ( populated only for locks,latch,CXPACKET,THREADPOOL)
*/

SELECT
WT.*
FROM sys.dm_os_waiting_tasks AS WT
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
/*

1. Check Wait_type column . If this column is repeated for several session_ids
with the same resource_id for long duration, resolve this wait_type

2. Check resource_address. If same resource_address is repeated then there is
a problem with the resource

3. wait_duration_ms provides evidence of high wait time
*/
-------------------------------------------------------------------------------

/*
sys.dm_os_wait_stats -- provides cumulative
wait type statistics from the time of sql server tart
-- You can clear the wait static information .
dbcc sqlperf('sys.dm_os.wait_stats','CLEAR')
-- NOTE: Avoid wait statisitcs clear in production environment
By default all wait types are not listed from sys.dm_os.wait_stats
-- To display all the wait types
dbcc traceon(8001,-1)
select * from sys.dm_os_wait_stats
*/

/*
Compare Signal Waits and Resource Waits
Signal waits are the time spent in the runnable queue waiting for the CPU,
while resource waits are the time spent waiting for the
resource (wait_time_ms - signal_wait_time_ms).
Wait_time_ms represents the total waits.
Check for CPU Issue

If %signal waits is around 25% and above, CPU issue

*/
Select signal_wait_time_ms=sum(signal_wait_time_ms)
,'%signal waits' = cast(100.0 * sum(signal_wait_time_ms) / sum (wait_time_ms) as numeric(20,2))
,resource_wait_time_ms=sum(wait_time_ms - signal_wait_time_ms)
,'%resource waits'= cast(100.0 * sum(wait_time_ms - signal_wait_time_ms) / sum (wait_time_ms) as numeric(20,2))
From sys.dm_os_wait_stats
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
go

-------------------------------------------------------------------------------------

-- Wait Types with high wait times
select top 10 *,wait_time_ms*1.0000/waiting_tasks_count as avg_wait_ms,
wait_time_ms/1000.00/60.00/24.00 as wait_time_days,
max_wait_time_ms/1000.00/60.00 as max_wait_time_min
from sys.dm_os_wait_stats
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
order by wait_time_ms desc

-----------------------------------------------------------------------------------
-- Wait Types with high maximum wait time
select top 10 *,
wait_time_ms/1000.00/60.00/24.00 as wait_time_days,
max_wait_time_ms/1000.00/60.00 as max_wait_time_min
from sys.dm_os_wait_stats
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
order by max_wait_time_ms desc
----------------------------------------------------------------------------
-- Wait Types with high waiting tasks count
select top 10 *, wait_time_ms*1.0000/waiting_tasks_count as avg_wait_ms,
wait_time_ms/1000.00/60.00/24.00 as wait_time_days,
max_wait_time_ms/1000.00/60.00 as max_wait_time_min
from sys.dm_os_wait_stats
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
order by waiting_tasks_count desc
------------------------------------------------------------------------
-- Wait Types with high average wait time

select top 20 *, wait_time_ms*1.0000/waiting_tasks_count as avg_wait_ms,
wait_time_ms/1000.00/60.00/24.00 as wait_time_days,
max_wait_time_ms/1000.00/60.00 as max_wait_time_min
from sys.dm_os_wait_stats
where wait_type not in (select wait_type from master.dbo.lib_vw_ignore_waittypes)
and waiting_tasks_count > 0
order by avg_wait_ms desc

------------------------------------------------------------------------------

Thursday, April 8, 2010

SQL Server 2008 system and memory Information

select
cpu_ticks,ms_ticks,cpu_count,hyperthread_ratio,
cast(physical_memory_in_bytes/1024.00/1024.00/1000 as numeric(8,2)) as physical_memory_GB,
cast(virtual_memory_in_bytes/1024.00/1024.00/1000 as numeric(8,2)) as virtual_memory_GB,
cast(bpool_committed*8/1024.00/1000 as numeric(8,2)) as bpool_committed_GB ,
cast(bpool_commit_target*8/1024.00/1000 as numeric(8,2)) as bpool_commit_target_GB,
cast(bpool_visible*8/1024.00/1000 as numeric(8,2)) as bpool_visible_GB,
cast(stack_size_in_bytes/1024.00 as numeric(8,2)) as stack_size_in_bytes_KB,
os_quantum,os_error_mode,
os_priority_class,max_workers_count,
scheduler_count,scheduler_total_count,
deadlock_monitor_serial_number,
sqlserver_start_time_ms_ticks,
sqlserver_start_time
from sys.dm_os_sys_info

-- Memory Information

select
cast(total_physical_memory_kb/1024.00/1000 as numeric(8,2)) as Phys_Mem_GB,
cast(available_physical_memory_kb/1024.00/1000 as numeric(8,2))as Available_Phys_Mem_GB,
cast(total_page_file_kb/1024.00/1000 as numeric(8,2)) as total_page_file_GB,
cast(available_page_file_kb/1024.00/1000 as numeric(8,2)) as available_page_file_GB,
cast(system_cache_kb/1024.00/1000 as numeric(8,2)) as system_cache_GB,
cast(kernel_paged_pool_kb/1024.00/1000 as numeric(8,2)) as kernel_paged_pool_GB,
cast(kernel_nonpaged_pool_kb/1024.00/1000 as numeric(8,2)) as kernel_nonpaged_pool_GB,
system_high_memory_signal_state,
system_low_memory_signal_state,
system_memory_state_desc
from sys.dm_os_sys_memory

Friday, April 2, 2010

Total amount of memory consumed (including AWE) by the buffer pool:

-- Total amount of memory consumed (including AWE) by the buffer pool:
/*

 Total Amount includes multi page allocator , virtual memory committed,shared memory committed and AWE
*/


SELECT SUM(multi_pages_kb + virtual_memory_committed_kb
+ shared_memory_committed_kb
+ awe_allocated_kb)/1024.00/1000.00 AS [Used by BPool, GB]
FROM sys.dm_os_memory_clerks
WHERE type = 'MEMORYCLERK_SQLBUFFERPOOL';

Time spent by workers in RUNNABLE state

-- Time spent by workers in RUNNABLE state
/*
You can capture  wait times along with signal wait times on an instance of sql server
to investigate   percentage of  CPU waits

*/


create table #tempsignalwait ( wait_time_ms bigint,signal_wait_time_ms bigint, batchid int identity(1,1))
declare @slno int,@counter int
set @slno = 0
set @counter = 3
while @slno < @counter
begin
set @slno = @slno + 1
insert into #tempsignalwait(wait_time_ms,signal_wait_time_ms)
SELECT SUM(wait_time_ms), SUM(signal_wait_time_ms)
FROM sys.dm_os_wait_stats;


waitfor delay '00:01:00' -- wait for a minute

end


select a.batchid,
b.wait_time_ms - a.wait_time_ms as delay_tot_ms_in_onemin,
b.signal_wait_time_ms - a.signal_wait_time_ms as delay_signal_ms_in_onemin,
(b.signal_wait_time_ms - a.signal_wait_time_ms)/(b.wait_time_ms - a.wait_time_ms) * 100.00 as percent_signal_delay
from #tempsignalwait a, #tempsignalwait b
where a.batchid < @counter and b.batchid > 1
and b.batchid = a.batchid + 1
order by a.batchid



drop table #tempsignalwait

Thursday, April 1, 2010

Procedure Cache Information with high paln cache size

/*
 To list entries from procedure cache with high cache plan size
 with parameter @noofrounds  = 0 all entries are returned
 with parameter @noofrounds  > 0 returns entries if  particular cache
 is removed from  previous and all round counts which indicates internal procedure cache pressure

Usage : 
exec .sp_lib_CacheInfo  0
exec .sp_lib_CacheInfo  1
*/






CREATE PROCEDURE dbo.sp_lib_CacheInfo @noofrounds int = 0

AS
BEGIN

 SET NOCOUNT ON
 SELECT DISTINCT cc.cache_address, cc.name, cc.type,
 cc.single_pages_kb + cc.multi_pages_kb AS total_kb,
 cc.single_pages_in_use_kb + cc.multi_pages_in_use_kb AS total_in_use_kb,
 cc.entries_count,
 cc.entries_in_use_count,
 ch.removed_all_rounds_count,
 ch.removed_last_round_count
FROM sys.dm_os_memory_cache_counters cc
 INNER JOIN sys.dm_os_memory_cache_clock_hands ch
 ON (cc.cache_address = ch.cache_address)
-- information only for moving hands caches
WHERE
(
               @noofrounds  = 0 
              OR
              ( @noofrounds  > 0
              AND
              ch.rounds_count > 0
              AND ch.removed_all_rounds_count > 0
              )
)
ORDER BY total_kb DESC

END

GO

Find Top Recompiles

-- To investigate top recompiles , use this Stored Procedure

/*
exec sp_lib_TopRecompiles  10 -- Top 10 Queries having high recompilation
*/


CREATE PROCEDURE dbo.sp_lib_TopRecompiles @CountID INT = 25

AS
BEGIN

SET NOCOUNT ON

SELECT TOP (@CountID), sql_text.TEXT, sql_handle,
plan_generation_num, execution_count, dbid,objectid
FROM sys.dm_exec_query_stats a
CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS sql_text
WHERE plan_generation_num > 1
ORDER BY plan_generation_num DESC
END

GO

List of tables referenced in the view

If you require to obtain list of tables in a view

exec sp_refreshview 'viewname'
exec sp_lib_TablesintheView 'ViewName'


/*

exec dbo.sp_lib_TablesintheView
*/

CREATE Procedure dbo.sp_lib_TablesintheView
@viewname varchar(200) = NULL
AS
SELECT vObj.name AS ViewName, vObj.id AS ViewID, vObj.xtype AS ViewType,
dep.depid AS DependentID, dep.depnumber AS Dependentnumber, tObj.name AS TableName, col.colid AS ColumnID,
col.name AS columnName
FROM sysobjects vObj LEFT OUTER JOIN
sysdepends dep ON vObj.id = dep.id LEFT OUTER JOIN
sysobjects tObj ON dep.depid = tObj.id LEFT OUTER JOIN
syscolumns col ON dep.depnumber = col.colid
AND tObj.id = col.id
WHERE vObj.xtype = 'V' And vObj.category = 0
AND ( ( vObj.name = @viewname ) OR ( @viewname IS NULL) )
ORDER BY vObj.name, tObj.name, col.name



go

Saturday, March 27, 2010

To list columns with specified delimiter horizontally /vertically

- Library Procedure to list columns with specified delimiter horizontally (one row ) or vertically ( columns)

-- Select Query -> Results to -> Text

/*
EXEC sp_lib_columns 'employee',',','H'
EXEC sp_lib_columns 'employee',',','V'
*/


CREATE procedure dbo.sp_lib_columns
@vchtablename varchar(200),
@chrdelimiter char(1) = ',',
@hvflag char(1) = 'H/V' -- Horizontal/Vertical
as

set nocount on
if @vchtablename is null or len(@vchtablename)= 0
return

if @chrdelimiter is null
return

set nocount on
declare @vchcolumns varchar(8000),@intmaxslno int
declare @tblcolumn table (Column_Name varchar(200),slno int identity(1,1))
set @vchcolumns='('
insert into @tblcolumn(Column_Name)
select column_name
from information_schema.columns where table_name=@vchtablename
order by Ordinal_position


select @intmaxslno=@@rowcount
if @hvflag = 'H'
begin
 select @vchcolumns=@vchcolumns+column_name+case when slno<>@intmaxslno then   @chrdelimiter else '' end
 from @tblcolumn
 order by slno
 select @vchcolumns + ')'

end

else

begin

 select @vchcolumns = @vchcolumns + char(10)

select @vchcolumns = @vchcolumns + column_name + case when slno<>@intmaxslno then @chrdelimiter else '' end + char(10)

 from @tblcolumn order by slno

select @vchcolumns + ')' + char(10)

end

GO

Fix orphan database users in a database after restoration

-- Fix orphan database users in a database after restoration

-- SQL 2005 and above

-- Assumption: Logins are created in the server for orphaned database users

declare @slno int, @maxslno int,@username varchar(100)

set @slno = 0
select identity(int,1,1) as slno ,
a.name as username into #temp
from sys.sysusers a,
sys.syslogins b
where not ( a.issqlrole = 1 or a.isntgroup = 1 or a.isntuser = 1 )
and a.hasdbaccess =1
and
not ( b.isntgroup = 1 or b.isntuser = 1 )
and a.name = b.name


select @maxslno = max(slno) from #temp

while @slno < @maxslno
begin
      set @slno = @slno + 1
      select @username = a.username from #temp a where slno = @slno

       EXEC sp_change_users_login 'auto_fix',@username

end

drop table #temp

Thursday, March 25, 2010

Credential, Proxies, SQL Server Agent Jobs

Terminology:


Credentials:

A credential is a record that contains the authentication information (credentials) required to connect to a resource outside SQL Server. Most credentials contain a Windows user name and password.

The information stored in a credential enables a user who has connected to SQL Server by way of SQL Server Authentication to access resources outside the server instance. When the external resource is Windows, the user is authenticated as the Windows user specified in the credential. A single credential can be mapped to multiple SQL Server logins. A SQL Server login can be mapped to only one credential.

System credentials are created automatically and are associated with specific endpoints. Names for system credentials start with two hash signs (##).

Proxies:

SQL Server Agent uses proxies to manage security contexts. A proxy can be used in more than one job step. Members of the sysadmin fixed server role can create proxies.

Each proxy corresponds to a security credential. Each proxy can be associated with a set of subsystems and a set of logins. The proxy can be used only for job steps that use a subsystem associated with the proxy. To create a job step that uses a specific proxy, the job owner must either use a login associated with that proxy or be a member of a role with unrestricted access to proxies. Members of the sysadmin fixed server role have unrestricted access to proxies. Members of SQLAgentUserRole, SQLAgentReaderRole, or SQLAgentOperatorRole can only use proxies to which they have been granted specific access. Each user that is a member of any of these SQL Server Agent fixed database roles must be granted access to specific proxies so that the user can create job steps that use those proxies.



Two Types of Proxies



1. xp_cmdshell proxy


2. SQL Server Agent Proxy


xp_cmdshell proxy :

This is used for non sysadmin users to execute xp_cmdshell from SSMS (QA) or from SPs or in Jobs ( Only as T SQL Job Step )

xp_cmdshell is disabled during sql server installation



1. Enable xp_cmdshell through Facets or Surface Area Configuration

2. Create a domain Windows account ( domain\sqljobrun) in the domain

3. For each database server use this account to create credentials

Note: This account should not be local admin account in database server and should not be sysadmin. It need not access SQL Server at all.

4. Provide Log on batch to sqljobrun: Start -> Administrative Tools - > Local Security Policy - > Log on as a batch Job Click Add User or Group .

Add domain\sqljobrun

5. Create a non sys admin Login and Database user for SQL Server User (sqltest)

6. Provide database access sqltest to master database

7. Provide execute access to xp_cmdshell in master database

Use master

Go

Grant exec on xp_cmdshell to sqltest

8. Server - > Object Explorer -> Right Click -> Properties - > Security

9. Server Proxy Account -> Enable Server Proxy Account

Proxy account :domain\sqljobrun

Password : xxxxxx



This will create system credential ##xp_cmdshell_proxy_account## under Security -> Credentials



sqltest can execute xp_cmdshell in SSMS, Stored Procedures and T-SQL job steps .

Note: This is not same as cmdexec . cmdexec uses direct execution of cmd.exe

Sqltest test will have same directory and window privileges of domain\sqljobrun



Note: Make sure domain\sqljobrun password newer expires .



Any error in executing xp_cmdshell, provide read/execute access to windows\system32\cmd.exe to domain\sqljobrun





SQL Agent Proxies :

The SQL Agent Proxy accounts allow users without sysadmin authority to own and run SQL Agent jobs. The SQL Agent contains a number of subsystems, each one dedicated to a particular type of work. A proxy account is required in order for an account without sysadmin authority to use these subsystems

The recommended approach is to configure each Non-Admin account that needs to run SQL Agent jobs as a proxy account. This will allow all the authorities required by the job to be contained within the job owner account. The first step in creating a SQL Agent proxy is to create a credential. Therefore, every Job Owner account would have a credential defined for it in order to both isolate and encapsulate the authorities required by a given job.

The credentials created for a given job owner are then associated with each type of SQL Agent subsystem used by the job owner. It is this association that creates the required proxies. This will ensure that all steps in the jobs owned by that account run with a predictable and consistent security profile.



SubSystem:

A subsystem is a predefined object that represents a set of functionality available to a SQL Server Agent proxy. Each proxy has access to one or more subsystems. Subsystems provide security because they delimit access to the functionality that is available to a proxy. Each job step runs in the context of a proxy, except for Transact-SQL job steps. Transact-SQL job steps use the EXECUTE AS command to set the security context.



Subsystem name Description

Microsoft ActiveX Script Run an ActiveX scripting job step.

Important:

The ActiveX Scripting subsystem will be removed from SQL Server Agent in a future version of Microsoft SQL Server. Avoid using this feature in new development work, and plan to modify applications that currently use this feature.



Operating System (CmdExec) Run an executable program.

PowerShell Run a PowerShell scripting job step.

Replication Distributor Run a job step that activates the replication Distribution Agent.

Replication Merge Run a job step that activates the replication Merge Agent.

Replication Queue Reader Run a job step that activates the replication Queue Reader Agent.

Replication Snapshot Run a job step that activates the replication Snapshot Agent.

Replication Transaction Log Reader Run a job step that activates the replication Log Reader Agent.

Analysis Services Command Run an Analysis Services command.

Analysis Services Query Run an Analysis Services query.

SSIS package execution Run an SSIS package.









Any non sysadmin Sql Server user who needs to create, execute and maintain sql server jobs must be member of one of these database fixed roles in msdb database.

SQLAgentUserRole


• SQLAgentReaderRole


• SQLAgentOperatorRole





When users who are not members of one of these roles are connected to SQL Server in SQL Server Management Studio, the SQL Server Agent node in Object Explorer is not visible. A user must be a member of one of these fixed database roles or a member of the sysadmin fixed server role to use SQL Server Agent.

Permissions of SQL Server Agent Fixed Database Roles

The SQL Server Agent database role permissions are concentric in relation to one another -- more privileged roles inherit the permissions of less privileged roles on SQL Server Agent objects (including alerts, operators, jobs, schedules, and proxies). For example, if members of least-privileged SQLAgentUserRole have been granted access to proxy_A, members of both SQLAgentReaderRole and SQLAgentOperatorRole automatically have access to this proxy even though access to proxy_A has not been explicitly granted to them. This may have security implications, which are discussed in the following sections about each role.

SQLAgentUserRole Permissions

SQLAgentUserRole is the least privileged of the SQL Server Agent fixed database roles. It has permissions on only operators, local jobs, and job schedules. Members of SQLAgentUserRole have permissions on only local jobs and job schedules that they own. They cannot use multiserver jobs (master and target server jobs), and they cannot change job ownership to gain access to jobs that they do not already own. SQLAgentUserRole members can view a list of available proxies only in the Job Step Properties dialog box of SQL Server Management Studio. Only the Jobs node in SQL Server Management Studio Object Explorer is visible to members of SQLAgentUserRole.

Security Note:

Consider the security implications before granting proxy access to members of the SQL Server Agent database roles. The SQLAgentReaderRole and the SQLAgentOperatorRole are automatically members of the SQLAgentUserRole. This means that members of SQLAgentReaderRole and SQLAgentOperatorRole have access to all SQL Server Agent proxies that have been granted to the SQLAgentUserRole and can use those proxies.

The following table summarizes SQLAgentUserRole permissions on SQL Server Agent objects.

Action Operators Local jobs (owned jobs only) Job schedules (owned schedules only) Proxies

Create/modify/delete No Yes 1 Yes No

View list (enumerate) Yes 2 Yes Yes Yes 3

Enable/disable No Yes Yes Not applicable

View properties No Yes Yes No

Execute/stop/start Not applicable Yes Not applicable Not applicable

View job history Not applicable Yes Not applicable Not applicable

Delete job history Not applicable No 4 Not applicable Not applicable

Attach/detach Not applicable Not applicable Yes Not applicable

1 Cannot change job ownership.

2 Can get list of available operators for use in sp_notify_operator and the Job Properties dialog box of Management Studio.

3 List of proxies only available in the Job Step Properties dialog box of Management Studio.

4 Members of SQLAgentUserRole must explicitly be granted the EXECUTE permission on sp_purge_jobhistory to delete job history on jobs that they own. They cannot delete job history for any other jobs.





SQLAgentReaderRole Permissions

SQLAgentReaderRole includes all the SQLAgentUserRole permissions as well as permissions to view the list of available multiserver jobs, their properties, and their history. Members of this role can also view the list of all available jobs and job schedules and their properties, not just those jobs and job schedules that they own. SQLAgentReaderRole members cannot change job ownership to gain access to jobs that they do not already own. Only the Jobs node in SQL Server Management Studio Object Explorer is visible to members of the SQLAgentReaderRole.

Security Note:

Consider the security implications before granting proxy access to members of the SQL Server Agent database roles. Members of SQLAgentReaderRole are automatically members of the SQLAgentUserRole. This means that members of SQLAgentReaderRole have access to all SQL Server Agent proxies that have been granted to SQLAgentUserRole and can use those proxies.

The following table summarizes SQLAgentReaderRole permissions on SQL Server Agent objects.

Action Operators Local jobs Multiserver jobs Job schedules Proxies

Create/modify/delete No Yes 1 (owned jobs only) No Yes (owned schedules only) No

View list (enumerate) Yes 2 Yes Yes Yes Yes 3

Enable/disable No Yes (owned jobs only) No Yes (owned schedules only) Not applicable

View properties No Yes Yes Yes No

Edit properties No Yes (owned jobs only) No Yes (owned schedules only) No

Execute/stop/start Not applicable Yes (owned jobs only) No Not applicable Not applicable

View job history Not applicable Yes Yes Not applicable Not applicable

Delete job history Not applicable No 4 No Not applicable Not applicable

Attach/detach Not applicable Not applicable Not applicable Yes (owned schedules only) Not applicable

1 Cannot change job ownership.

2 Can get list of available operators for use in sp_notify_operator and the Job Properties dialog box of Management Studio.

3 List of proxies only available in the Job Step Properties dialog box of Management Studio.

4 Members of SQLAgentReaderRole must explicitly be granted the EXECUTE permission on sp_purge_jobhistory to delete job history on jobs that they own. They cannot delete job history for any other jobs.





SQLAgentOperatorRole Permissions

SQLAgentOperatorRole is the most privileged of the SQL Server Agent fixed database roles. It includes all the permissions of SQLAgentUserRole and SQLAgentReaderRole. Members of this role can also view properties for operators and proxies, and enumerate available proxies and alerts on the server.

SQLAgentOperatorRole members have additional permissions on local jobs and schedules. They can execute, stop, or start all local jobs, and they can delete the job history for any local job on the server. They can also enable or disable all local jobs and schedules on the server. To enable or disable local jobs or schedules, members of this role must use the stored procedures sp_update_job and sp_update_schedule. Only the parameters that specify the job or schedule name or identifier and the @enabled parameter can be specified by members of SQLAgentOperatorRole. If they specify any other parameters, execution of these stored procedures fails. SQLAgentOperatorRole members cannot change job ownership to gain access to jobs that they do not already own.

The Jobs, Alerts, Operators, and Proxies nodes in SQL Server Management Studio Object Explorer are visible to members of SQLAgentOperatorRole. Only the Error Logs node is not visible to members of this role.

Security Note:

Consider the security implications before granting proxy access to members of the SQL Server Agent database roles. Members of SQLAgentOperatorRole are automatically members of SQLAgentUserRole and SQLAgentReaderRole. This means that members of SQLAgentOperatorRole have access to all SQL Server Agent proxies that have been granted to either SQLAgentUserRole or SQLAgentReaderRole and can use those proxies.

The following table summarizes SQLAgentOperatorRole permissions on SQL Server Agent objects.

Action Alerts Operators Local jobs Multiserver jobs Job schedules Proxies

Create/modify/delete No No Yes 2 (owned jobs only) No Yes (owned schedules only) No

View list (enumerate) Yes Yes 1 Yes Yes Yes Yes

Enable/disable No No Yes 3 No Yes 4 Not applicable

View properties Yes Yes Yes Yes Yes Yes

Edit properties No No Yes (owned jobs only) No Yes (owned schedules only) No

Execute/stop/start Not applicable Not applicable Yes No Not applicable Not applicable

View job history Not applicable Not applicable Yes Yes Not applicable Not applicable

Delete job history Not applicable Not applicable Yes No Not applicable Not applicable

Attach/detach Not applicable Not applicable Not applicable Not applicable Yes (owned schedules only) Not applicable

1 Can get list of available operators for use in sp_notify_operator and the Job Properties dialog box of Management Studio.

2 Cannot change job ownership.

3 SQLAgentOperatorRole members can enable or disable local jobs they do not own by using the stored procedure sp_update_job and specifying values for the @enabled and the @job_id (or @job_name) parameters. If a member of this role specifies any other parameters for this stored procedure, execution of the procedure will fail.

4 SQLAgentOperatorRole members can enable or disable schedules they do not own by using the stored procedure sp_update_schedule and specifying values for the @enabled and the @schedule_id (or @name) parameters. If a member of this role specifies any other parameters for this stored procedure, execution of the procedure will fail.





To execute cmdexec and Active X Script for sqltest



1. Login as sysadmin

2. Go to Object Explorer - > Security - > Credentials

3. New Credential

Credential name : sqltest credential

Identity : domain\sqljobrun

Password: xxxxx

Confirm password: xxxxx

OK

4. SQL Server Agent - > Proxies

5. New Proxy

Proxy name: sqltest proxy

Credential name: sqltest credential

Active to the following Subsystem



Check Box :

ActiveX Script

OperatingSystem(CmdExec)

6. Principals : Add sqltest



With this sqltest can create jobs. For job step type Operating System(cmdexec) sqltest should select sqltest proxy to run the job

DBMAIL

DBMAIL


Terminology:

Account: Email Account (Not SQL Server User) having access to SMTP servers. Multiple accounts are only required if more than one SMTP server is present or to use more than one sender’s email address.

Profile: Set of Email Accounts.

Note: A profile can consist of multiple accounts. An account can be member of different profiles. This is a many to many relationship.

Users: SQL Server Users who directly or through group membership are in DatabaseMailUserRole in msdb to send emails. Access to Users is provided through profiles. For Public profile all users in msdb and member of DatabaseMailUserRole in can send mail. For Private Profile, sysadmin need to provide access to sql servers.

Prerequisites:

1. SMTP server
2. Enable DBMAIL ( Facets (SQL 2008) or Surface Area Configuration (SQL 2005 )

3. Email account or set of email accounts having access to SMTP

4. SQL Server Service Account should have access to SMTP Server. So this should be a domain account.

5. Database Server should have access to SMTP. SMTP relay for the system should be enabled at SMTP (Exchange) server.
6. SQL Server Users who needs to send email must be a member of DatabaseMailUserRole in msdb and should have execute permission to sp_send_dbmail

7. For Private Profiles , sysadmin should provide access to users in msdb having DatabaseMailUserRole in msdb
8. For Public Profile , All users in DatabaseMailUserRole in msdb can send the mail

Introduction



1. Database Mail can be used for sending e-mail messages to users from the SQL Server Database Engine. The messages can contain query results/include files from any resource on the network.
2. Database Mail uses the standard Simple Mail Transfer Protocol (SMTP) to send mail. This does not require any POP3 client like outlook to be installed on SQL Server which is the case for SQL MAIL.

3. To minimize the impact on SQL Server, the component that delivers e-mail runs outside of SQL Server, in a separate process. SQL Server will continue to queue e-mail messages even if the external process stops or fails. The queued messages will be sent once the outside process or SMTP server comes online.

4. A Database Mail profile allows specifying more than one SMTP server. If an SMTP server is unavailable, mail can still be delivered to another SMTP server

5. Database Mail is fully supported on a cluster

6. Database Mail provides background, or asynchronous, delivery. When sp_send_dbmail is called to send a message, Database Mail adds a request to a Service Broker queue. The stored procedure returns immediately. The external e-mail component receives the request and delivers the e-mail.

7. Database Mail allows creating multiple profiles within a SQL Server instance. Optionally, a profile can be chosen to send a message.
8. Each profile can contain multiple failover accounts. Different profiles can be configured with different accounts to distribute e-mail across multiple e-mail servers

9. Database Mail is fully supported on 64-bit installations of SQL Servers

10. To send Database Mail, user must be a member of the DatabaseMailUserRole database role in the msdb database.

11. A Public profile consists of all users in msdb database users or groups.

12. A private profile restricts access to a specified list of users.

13. Users can figure Attachment file size and attached file extensions with sysmail_configure_sp

14. Database Mail maintains the information for e-mail accounts within SQL Server Database Engine. This can be configured through Database Mail Configuration Wizard or T-SQL.

15. Database Mail logs e-mail activity to SQL Server, the Microsoft Windows application event log, and to tables in the msdb database.

16. Database Mail keeps copies of messages and attachments sent in the msdb database for auditing Database Mail usage and review the retained messages

17. Database Mail allows e-mails formatted as HTML
18. Database Mail can be enabled By

• Database Mail Configuration Wizard

• sp_configure

• surface area configuration

19. Sysadmin can only use the dbmail wizard. Users in DatabaseMailUserRole database role in msdb can only send messages 

DB Mail Architecture



1. Configuration and security objects create profiles and accounts used by Database Mail in msdb

2. The msdb database acts as the mail-host database that holds the messaging objects those Database Mail users to send e-mail. These objects include the sp_send_dbmail stored procedure and the data structures that hold information about messages.

3. msdb size need to be controlled periodically since this database contains all the email sent with attachments. Daily backup of this database is very important.

4. The Database Mail executable is an external program that reads from a queue in the msdb database and sends messages to e-mail servers

5. Database Mail records logging information in the msdb database and the Microsoft Windows application event log.

Database Mail is designed on a queued architecture that uses service broker technologies. When users execute sp_send_dbmail, the stored procedure inserts an item into the mail queue and creates a record that contains the e-mail message. Inserting the new entry in the mail queue starts the external Database Mail process (DatabaseMail90.exe). The external process reads the e-mail information and sends the e-mail message to the appropriate e-mail server or servers. The external process inserts an item in the Status queue for the outcome of the send operation. Inserting the new entry in the status queue starts an internal stored procedure that updates the status of the e-mail message. Besides storing the sent, or unsent, e-mail message, Database Mail also records any e-mail attachments in the system tables. Database Mail views provide the status of messages for troubleshooting, and stored procedures allow for administration of the Database Mail queue.


Database Mail provides two types of configuration objects:

• Database Mail profiles are sets of accounts.

• Database Mail accounts contain information for e-mail servers.


sp_send_dbmail

Sends an e-mail message to the specified recipients. The message may include a query result set, file attachments, or both. When mail is successfully placed in the Database Mail queue, sp_send_dbmail returns the mailitem_id of the message. This stored procedure is in the msdb database.

Transact-SQL Syntax Conventions

Syntax

sp_send_dbmail [ [ @profile_name = ] 'profile_name' ]

[ , [ @recipients = ] 'recipients [ ; ...n ]' ]

[ , [ @copy_recipients = ] 'copy_recipient [ ; ...n ]' ]

[ , [ @blind_copy_recipients = ] 'blind_copy_recipient [ ; ...n ]' ]

[ , [ @subject = ] 'subject' ]

[ , [ @body = ] 'body' ]

[ , [ @body_format = ] 'body_format' ]

[ , [ @importance = ] 'importance' ]

[ , [ @sensitivity = ] 'sensitivity' ]

[ , [ @file_attachments = ] 'attachment [ ; ...n ]' ]

[ , [ @query = ] 'query' ]

[ , [ @execute_query_database = ] 'execute_query_database' ]

[ , [ @attach_query_result_as_file = ] attach_query_result_as_file ]

[ , [ @query_attachment_filename = ] query_attachment_filename ]

[ , [ @query_result_header = ] query_result_header ]

[ , [ @query_result_width = ] query_result_width ]

[ , [ @query_result_separator = ] 'query_result_separator' ]

[ , [ @exclude_query_output = ] exclude_query_output ]

[ , [ @append_query_error = ] append_query_error ]

[ , [ @query_no_truncate = ] query_no_truncate ]

[ , [ @mailitem_id = ] mailitem_id ] [ OUTPUT ]

Arguments

[ @profile_name = ] 'profile_name'

Is the name of the profile to send the message from. The profile_name is of type sysname, with a default of NULL. The profile_name must be the name of an existing Database Mail profile. When no profile_name is specified, sp_send_dbmail uses the default private profile for the current user. If the user does not have a default private profile, sp_send_dbmail uses the default public profile for the msdb database. If the user does not have a default private profile and there is no default public profile for the database, @profile_name must be specified.

[ @recipients = ] 'recipients'

Is a semicolon-delimited list of e-mail addresses to send the message to. The recipients list is of type varchar(max). Although this parameter is optional, at least one of @recipients, @copy_recipients, or @blind_copy_recipients must be specified, or sp_send_dbmail returns an error.

[ @copy_recipients = ] 'copy_recipients'

Is a semicolon-delimited list of e-mail addresses to carbon copy the message to. The copy recipients list is of type varchar(max). Although this parameter is optional, at least one of @recipients, @copy_recipients, or @blind_copy_recipients must be specified, or sp_send_dbmail returns an error.

[ @blind_copy_recipients = ] 'blind_copy_recipients'

Is a semicolon-delimited list of e-mail addresses to blind carbon copy the message to. The blind copy recipients list is of type varchar(max). Although this parameter is optional, at least one of @recipients, @copy_recipients, or @blind_copy_recipients must be specified, or sp_send_dbmail returns an error.

[ @subject = ] 'subject'

Is the subject of the e-mail message. The subject is of type nvarchar(255). If no subject is specified, the default is 'SQL Server Message'.

[ @body = ] 'body'

Is the body of the e-mail message. The message body is of type nvarchar(max), with a default of NULL.

[ @body_format = ] 'body_format'

Is the format of the message body. The parameter is of type varchar(20), with a default of NULL. When specified, the headers of the outgoing message are set to indicate that the message body has the specified format. The parameter may contain one of the following values:

• TEXT

• HTML

Defaults to TEXT.

[ @importance = ] 'importance'

Is the importance of the message. The parameter is of type varchar(6). The parameter may contain one of the following values:

• Low

• Normal

• High

Defaults to Normal.

[ @sensitivity = ] 'sensitivity'

Is the sensitivity of the message. The parameter is of type varchar(12). The parameter may contain one of the following values:

• Normal

• Personal

• Private

• Confidential

Defaults to Normal.

[ @file_attachments = ] 'file_attachments'

Is a semicolon-delimited list of file names to attach to the e-mail message. Files in the list must be specified as absolute paths. The attachments list is of type nvarchar(max).

[ @query = ] 'query'

Is a query to execute. The results of the query can be attached as a file, or included in the body of the e-mail message. The query is of type nvarchar(max), and can contain any valid Transact-SQL statements. Note that the query is executed in a separate session, so local variables in the script calling sp_send_dbmail are not available to the query.

[ @execute_query_database = ] 'execute_query_database'

Is the database context within which the stored procedure runs the query. The parameter is of type sysname, with a default of the current database. This parameter is only applicable if @query is specified.

[ @attach_query_result_as_file = ] attach_query_result_as_file

Specifies whether the result set of the query is returned as an attached file. attach_query_result_as_file is of type bit, with a default of 0.

When the value is 0, the query results are included in the body of the e-mail message, after the contents of the @body parameter. When the value is 1, the results are returned as an attachment. This parameter is only applicable if @query is specified.

[ @query_attachment_filename = ] query_attachment_filename

Specifies the file name to use for the result set of the query attachment. query_attachment_filename is of type nvarchar(255), with a default of NULL. This parameter is ignored when attach_query_result is 0. When attach_query_result is 1 and this parameter is NULL, Database Mail creates an arbitrary filename.

[ @query_result_header = ] query_result_header

Specifies whether the query results include column headers. The query_result_header value is of type bit. When the value is 1, query results contain column headers. When the value is 0, query results do not include column headers. This parameter defaults to 1. This parameter is only applicable if @query is specified.

[ @query_result_width = ] query_result_width

Is the line width, in characters, to use for formatting the results of the query. The query_result_width is of type int, with a default of 256. The value provided must be between 10 and 32767. This parameter is only applicable if @query is specified.

[ @query_result_separator = ] 'query_result_separator'

Is the character used to separate columns in the query output. The separator is of type char(1). Defaults to ' ' (space).

[ @exclude_query_output = ] exclude_query_output

Specifies whether to return the output of the query execution in the e-mail message. exclude_query_output is bit, with a default of 0. When this parameter is 0, the execution of the sp_send_dbmail stored procedure prints the message returned as the result of the query execution on the console. When this parameter is 1, the execution of the sp_send_dbmail stored procedure does not print any of the query execution messages on the console.

[ @append_query_error = ] append_query_error

Specifies whether to send the e-mail when an error returns from the query specified in the @query argument. append_query_error is bit, with a default of 0. When this parameter is 1, Database Mail sends the e-mail message and includes the query error message in the body of the e-mail message. When this parameter is 0, Database Mail does not send the e-mail message, and sp_send_dbmail ends with return code 1, indicating failure.

[ @query_no_truncate = ] query_no_truncate

Specifies whether to execute the query with the option that avoids truncation of large variable length data types (varchar(max), nvarchar(max), varbinary(max), xml, text, ntext, image, and user-defined data types). When set, query results do not include column headers. The query_no_truncate value is of type bit. When the value is 0 or not specified, columns in the query truncate to 256 characters. When the value is 1, columns in the query are not truncated. This parameter defaults to 0.

Note:

When used with large amounts of data, the @query_no_truncate option consumes additional resources and can slow server performance.

[ @mailitem_id = ] mailitem_id [ OUTPUT ]

Optional output parameter returns the mailitem_id of the message. The mailitem_id is of type int.

Return Code Values

Friday, March 19, 2010

TEXT to VARCAHR(MAX)

                                    TEXT to VARCAHR(MAX)



You may have migrated from SQL Server 2000 to SQL Server 2005/2008. There are several tables in your database having text data type columns. Text data type and image columns are deprecated, and replaced by varchar (max) and varbinary(max) . Apart from this, if the text data is stored outside the row, when this column is referenced in select, insert, update statements, it will consume extra reads slowing down query performance

You can check, whether text data is stored inside the row or on a separate data page by using the following query

select objectproperty(object_id('tablename'),'TableTextInRowLimit')

If the output is 0 then text in row limit is not set. i.e. The text column(s) are stored out side the data page and pointers in the data row, references the text data stored in text pages.

In SQL 2005 and above, by default lob data stored inside the data row. If we want to store lob data outside the row

EXEC sp_tableoption 'tablename', 'large value types out of row', 1;

Step by Step process to convert text to varchar(max)

We will take an example of an employee table and go through the step by step process to convert text data types to varchar (max) .

Script 1 :
use tempdb
go
if exists (select 1 from sys.tables where name = 'employee')
begin
drop table dbo.employee
end

create table dbo.employee
(
employee_id int identity(1,1) primary key ,
employee_name varchar(25) not null,
salary numeric(10,2) not null,
employee_info text
)
go

insert into employee ( employee_name,salary,employee_info)
values ('Rama',10000,'Good Character' )
insert into employee ( employee_name,salary,employee_info)
values ('Mohammad',40000,'Nice' )
insert into employee ( employee_name,salary,employee_info)
values ('James',50000,'Compassion' )
insert into employee ( employee_name,salary,employee_info)
values ('Saraswathi',50000,'Has high Knowledge' )
insert into employee ( employee_name,salary,employee_info)
values ('Krishna',20000,'Very Good' )
insert into employee ( employee_name,salary,employee_info)
values ('Fathima',32000.40,'Caring' )
insert into employee ( employee_name,salary,employee_info)
values ('Teresa',44000.50,'Professional' )
insert into employee ( employee_name,salary,employee_info)
values ('Buddha',90000.00,'Calm' )
go

Script 2 :

select objectproperty(object_id('employee'),'TableTextInRowLimit')
–- Returns 0 , which means text column rows are stored outside the data page
go

Now, let us create a function which will return page number in readable format
Script 3 :
create function dbo.ufn_convertpage_nums(@page_num binary (6))
returns varchar(20)
as
begin
return
( convert(varchar(2),(convert(int,substring(@page_num,6,1)) * power(2,8)) +
convert(int,substring(@page_num,5,1)))) + ':' +
convert( varchar(11),
( convert(int, substring(@page_num,4,1 )) * power(2,24)) +
( convert(int, substring(@page_num,3,1 )) * power(2,16)) +
( convert(int, substring(@page_num,2,1 )) * power(2,8)) +
( convert(int, substring(@page_num,1,1 )))

)
end

go



Script 4 : CAUTION: Don’t run this on production server. Run only on test server

use tempdb
go

DBCC DROPCLEANBUFFERS WITH NO_INFOMSGS
DBCC FREEPROCCACHE WITH NO_INFOMSGS
DBCC FREESYSTEMCACHE ('All')

set statistics io on
go

set nocount on
select employee_id,employee_name, employee_info from dbo.employee
go

set statistics io off
go

Output is

-- Table 'employee'. Scan count 1, logical reads 3, physical reads 0, read-ahead reads 0, lob logical reads 16, lob physical reads 0, lob read-ahead reads 0.
There are 16 lobs (large object) logical reads. Since there are 8 rows in the table
Script 5 :
select convert(varchar(25),object_name(p.object_id)) as tablename,
a.type_desc as page_type_desc,
total_pages as totpages,
used_pages,
data_pages ,
partition_number as pnum,rows ,
dbo.ufn_convertpage_nums(first_page) as first_page,
dbo.ufn_convertpage_nums(root_page) as root_page,
dbo.ufn_convertpage_nums(first_iam_page) as first_iam_page,
partition_id,
allocation_unit_id
from sys.partitions p
inner join sys.system_internals_allocation_units a
on p.partition_id = a.container_id
where p.object_id = object_id('dbo.employee')
go



tablename page_type_desc totpages used_pages data_pages pnum rows first_page root_page first_iam_page partition_id allocation_unit_id

employee IN_ROW_DATA 2 2 1 1 8 1:155 1:155 1:158 72057594038648800 72057594043236300

employee LOB_DATA 2 2 0 1 8 1:153 1:153 1:154 72057594038648800 72057594043301800



There are 2 LOB_DATA pages



After migration for previous versions of SQL server to SQL Server 2005/2008, text data type should be altered to varchar (max)

Script 6 :
alter table employee
alter column employee_info varchar(max)
go



Again, Run Script 4

Table 'employee'. Scan count 1, logical reads 3, physical reads 1, read-ahead reads 0, lob logical reads 8, lob physical reads 1, lob read-ahead reads 0.
Lob reads reduced from 16 to 8


Running Script 5
tablename page_type_desc totpages used_pages data_pages pnum rows first_page root_page first_iam_page partition_id allocation_unit_id

employee IN_ROW_DATA 2 2 1 1 8 1:155 1:155 1:158 72057594039238600 72057594044809200

employee LOB_DATA 2 2 0 1 8 1:153 1:153 1:154 72057594039238600 72057594044874700

employee ROW_OVERFLOW_DATA 0 0 0 1 8 0:0 0:0 0:0 72057594039238600 72057594044940200

You can see that a new page type description ROW_OVERFLOW_DATA is created. This page type is used when a row,does not have enough space to hold its maximum capacity of 8060 bytes

To make it more efficient, and reduce lob reads we need to update the table. We are updating the same column to push out of row data to inside the data row.

Script 7 :

update employee set
employee_info = employee_info
go



Note: If there are large number of rows , update in batches to reduce logging and performance improvement.

Again, Run Script 4

Table 'employee'. Scan count 1, logical reads 3, physical reads 0, read-ahead reads 0, lob logical reads 0,

lob physical reads 0, lob read-ahead reads 0.

Number of Lob reads are reduced to 0. This means data is moved from outside text pages to inside the data row

As an alterantive we can create the table with varchar(max) instead of text data type

Script 8 :

Note: In SQL Server 2005 and later, by default varchar(max) data is stored in the data row,
if the length of row does not exceed 8060. If length of row reaches this threshold,
data is pushed out to large object page (text page)

if exists (select 1 from sys.tables where name = 'employee')
begin
drop table dbo.employee
end

create table dbo.employee
(
employee_id int identity(1,1) primary key ,
employee_name varchar(25) not null,
salary numeric(10,2) not null,
employee_info varchar(max)
)
go



Run the insert script (Script 1 )

Run Script 4

Table 'employee'. Scan count 1, logical reads 3, physical reads 0, read-ahead reads 0,
lob logical reads 0, lob physical reads 0, lob read-ahead reads 0.

Run Script 5

tablename page_type_desc totpages used_pages data_pages pnum rows first_page root_page first_iam_page partition_id allocation_unit_id

employee IN_ROW_DATA 2 2 1 1 8 1:153 1:153 1:154 72057594039304100 72057594045005800

employee LOB_DATA 0 0 0 1 8 0:0 0:0 0:0 72057594039304100 72057594045071300

After migration or if there are text data type columns , it is better to create a new table replacing text data types with varchar(max) data type and populating data from the old table

Conclusion

Recommended Method After Migration:


1. Create a new table (employee_new) replacing text data type with varchar(max)
2. Populate from old table (employee) to employee_new .
Note: If number of rows are very high , insert in batches to avoid bloating of log file and to improve performance
3. Drop old table (employee) . You may be required to drop constraints if any before dropping the table
4. Rename new table (employee_new) to old table (employee) . Put back the dropped constraints if any

Alternative Method

1. Alter table data type from text to varchar(max)
2. Update the same column
Note: If number of rows are very high ,update in batches to avoid bloating of log file and to improve performance