Labels

Monday, 21 September 2020

Script to send e-mail once transaction logfile usage reaches to 60%

 

USE msdb

SET NOCOUNT ON;

IF OBJECT_ID('tempdb..#MonitorTransactionLogFileUsage') IS NOT NULL
	DROP TABLE #MonitorTransactionLogFileUsage

CREATE TABLE #MonitorTransactionLogFileUsage (
	ID INT IDENTITY(1, 1)
	,DatabaseName SYSNAME
	,LogSizeInMB DECIMAL(18, 5)
	,LogSpaceUsedInPercentage DECIMAL(18, 5)
	,[Status] INT
	)

INSERT INTO #MonitorTransactionLogFileUsage (
	DatabaseName
	,LogSizeInMB
	,LogSpaceUsedInPercentage
	,[Status]
	)
EXEC ('DBCC SQLPERF(LOGSPACE)')

IF OBJECT_ID('tempdb..#LogfileGrowth') IS NOT NULL
	DROP TABLE #LogfileGrowth

CREATE TABLE #LogfileGrowth (LogGrowth VARCHAR(4000))

INSERT INTO #LogfileGrowth
SELECT 'Database ' + '' + cast(DatabaseName AS VARCHAR(50)) + 
 ' LogSpaceUsedInPercentage  has grown to ' + 
cast(LogSpaceUsedInPercentage AS VARCHAR(50)) + '' + '% ' + ' ' + '' +  
'Database recover model is ' + '' + cast(recovery_model_desc 
COLLATE Latin1_General_CI_AS_KS_WS AS VARCHAR(50)) + ' and it is growing due to ' 
+ cast(log_reuse_wait_desc AS VARCHAR(150)) LogGrowth
FROM #MonitorTransactionLogFileUsage a
INNER JOIN sys.databases b ON a.DatabaseName = b.name
	AND a.DatabaseName = 'DB Name you would like to monitor '
WHERE LogSpaceUsedInPercentage > 60

DECLARE @body1 VARCHAR(4000)

SET @body1 = (
		SELECT TOP 1 LogGrowth
		FROM #LogfileGrowth
		)

DECLARE @server_name VARCHAR(100);

SELECT @server_name = convert(VARCHAR(100), SERVERPROPERTY('servername'));

DECLARE @sub VARCHAR(1000);

SELECT @sub = 'Logfile is getting filled on  ' + @server_name + ' ';

DECLARE @rowcount INT

SELECT @rowcount = (
		SELECT count(1)
		FROM #LogfileGrowth
		HAVING count(1) > 0
		)

IF @rowcount > 0
BEGIN
	--database-notifications@accuratebackground.pagerduty.com
	EXEC msdb.dbo.sp_send_dbmail @profile_name = 'your db profile name'
		,@recipients = ' mention list of recipients you need to send mail'
		,@subject = @sub
		,@body = @body1
		,@body_format = 'HTML';
END

Script to monitor TransactionLogFileUsage and trigger backups automatically once logfile usage reaches to 40%

  

IF OBJECT_ID('tempdb..#MonitorTransactionLogFileUsage') IS NOT NULL

	DROP TABLE #MonitorTransactionLogFileUsage

CREATE TABLE #MonitorTransactionLogFileUsage (
	ID INT IDENTITY(1, 1)
	,DatabaseName SYSNAME
	,LogSizeInMB DECIMAL(18, 5)
	,LogSpaceUsedInPercentage DECIMAL(18, 5)
	,[Status] INT
	)

INSERT INTO #MonitorTransactionLogFileUsage (
	DatabaseName
	,LogSizeInMB
	,LogSpaceUsedInPercentage
	,[Status]
	)
EXEC ('DBCC SQLPERF(LOGSPACE)')

SELECT *
FROM #MonitorTransactionLogFileUsage

/* declare variables */
DECLARE @variable SYSNAME
	,@jobname SYSNAME

DECLARE cursor_name CURSOR FAST_FORWARD READ_ONLY
FOR
SELECT DatabaseName
FROM #MonitorTransactionLogFileUsage
WHERE LogSpaceUsedInPercentage >= 40

OPEN cursor_name

FETCH NEXT
FROM cursor_name
INTO @variable

WHILE @@FETCH_STATUS = 0
BEGIN
	SET @jobname = N'LSBackup_' + @variable

	IF EXISTS (
			SELECT name
			FROM msdb.dbo.sysjobs
			WHERE name = @jobname
			)
	BEGIN
		IF NOT EXISTS (
				SELECT 1
				FROM msdb.dbo.sysjobs_view job
				INNER JOIN msdb.dbo.sysjobactivity activity  
ON job.job_id = activity.job_id
WHERE activity.run_requested_date IS NOT NULL
		AND activity.stop_execution_date IS NULL
		AND job.name = @jobname
				)
		BEGIN
			EXEC msdb.dbo.sp_start_job @job_name = @jobname
		END
		ELSE
		BEGIN
			PRINT 'Job ''' + @jobname + ''' is already started ';
		END
	END

	FETCH NEXT
	FROM cursor_name
	INTO @variable
END

CLOSE cursor_name

DEALLOCATE cursor_name

DROP TABLE #MonitorTransactionLogFileUsage

Thursday, 17 September 2020

Identify queries causing logfile growth or consuming CPU,Memory

 During troubleshooting performance related issues such as what is caused for log file growth , currently running , causing CPU spikes etc the below query is very handful.

SELECT TOP 50 qs.execution_count
	,exec_count_per_sec = 
qs.execution_count / 
NULLIF(DATEDIFF(second, qs.creation_time, qs.last_execution_time), 0)
	,qs.creation_time
	,qs.last_execution_time
	,qs.total_worker_time AS Total_CPU
	,total_CPU_inSeconds = --Converted from microseconds
	qs.total_worker_time / 1000000
	,total_elapsed_time_inSeconds = --Converted from microseconds
	qs.total_elapsed_time / 1000000
	,qs.total_logical_reads
	,qs.total_logical_writes
	,qs.total_elapsed_time
	,st.TEXT
	,qp.query_plan
FROM sys.dm_exec_query_stats AS qs
CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) AS st
CROSS APPLY sys.dm_exec_query_plan(qs.plan_handle) AS qp
--WHERE st.text like 'test%'
ORDER BY qs.total_logical_writes DESC

 



Friday, 4 September 2020

Fix : Msg 4928, Level 16, State 1, Line 9 Cannot alter column 'XXXXXX' because it is 'REPLICATED'

 Msg 4928, Level 16, State 1, Line 9

Cannot alter column 'xxxxxxxxxx' because it is 'REPLICATED'.  

I got this error when I tried to increase table column data type length from 150 to 300. I have removed the article from publication but no luck still error coming.
 
Finally the below procedure helped me to fix the issue :

This article had been previously removed from Two publications. So the above error should not have occurred...   We have a consistency issue. 

 Diagnostic  1: Check syscolumns.colstat.

 

Select * from syscolumns where id = object_id(<table name>)

The value of the colstat field for the particular column was 8192. I've run into this before on SQL 2k. The fix at that time was to update this field to 0 for the offending column. If you attempt that now, you get the following error :

Msg 259, Level 16, State 1, Line 2

Ad hoc updates to system catalogs are not allowed.

Either way... the 8192 is going to be an issue. I don't have the luxury of putting my production server into single user mode to modify system catalogs... so my fix needs to be real time and online.

Diagnostic 2: check sys.columns

Select is_non_sql_subscribed ,* from sys.columns where object_id = object_id(<table name>)

If the is_non_sql_subscribed field = 1, you have an issue. This can happen on servers with any type of replication including SQL Server only.  I'm currently running SQL 2k5 replication in my environment. No third party replication. Either way , the value returned for me was "1" which identifies an inconsistency issue. 

When the snapshot agent creates a new snapshot for your publication it sets this field to 1, once the snapshot is finished the agent resets the value back to 0. Our solution will use this snapshot agent behavior.

 

Workaround:

Create a new publication with your questionable article. Snapshot it. Check the data. SQL will attempt to fix your inconsistencies and properly flag the is_non_sql_subscribed field. Drop the article from your new publication, make your table edits and add the article back to your original publications.