Migration.properties File
Migration.properties
file explains all the parameters required to run the CT-V Bulk Utility for tokenization of plaintext in File-to-File operation using token vault. It allows the user to configure the input and output files.
Below is a sample parameters file that can be used as template:
# CipherTrust Vaulted Tokenization Bulk Migration Configuration
#
# To run migration use the following command
#
# java com.safenet.token.migration.main config-file-path -t
#
# Note: This is a sample file and needs to be customized to your specific
# environment
#
###############################################################################
#####################
# Input Configuration
# Input.FilePath
# Input.Type
#####################
#
# Input.FilePath
#
# Full path to the input file
#
Input.FilePath = C:\\Desktop\\migration\\customerTable.csv
#
# Input.Type
#
# Format of the input file
#
# Valid values
# Delimited
# Positional
#
Input.Type = Delimited
###############################
# Delimited Input Configuration
# Input.EscapeCharacter
# Input.QuoteCharacter
# Input.ColumnDelimiter
###############################
#
# Input.EscapeCharacter
#
# Specifies a character that is used to 'escape' special characters that
# alter input processing
#
# Note: this parameter is ignored if Input.Type is set to Positional
#
Input.EscapeCharacter = \\
#
# Input.QuoteCharacter
#
# Specifies a character that is used around character sequences that contain
# delimiter characters and are to be treated as a single column value
#
# Note: this parameter is ignored if Input.Type is set to Positional
#
Input.QuoteCharacter = "
#
# Input.ColumnDelimiter
#
# Specifies a character that separates columns in the input file
#
# Note: this parameter is ignored if Input.Type is set to Positional
#
Input.ColumnDelimiter = ,
################################
# Positional Input Configuration
# Input.Column0.Start
# Input.Column0.End
# ...
# Input.ColumnN.Start
# Input.ColumnN.End
################################
Note: For positional input type, a tab will be considered as a single character.
#
# Input.ColumnN.Start
#
# Specifies zero-based position where the value starts. The character in the
# specified position is included in the column value. This value must be
# specified for every column in the input file which has to be processed
# or passed-through and included in the output file.
#
# Note: This parameter is ignored if Input.Type is set to Delimited
#
Input.Column0.Start =
#
# Input.ColumnN.End
#
# Specifies zero-based position where the value ends. The character in the
# specified position is included in the column value. This value must be
# specified for every column in the input file which has to be processed
# or passed-through and included in the output file.
#
# Note: This parameter is ignored if Input.Type is set to Delimited
#
Input.Column0.End =
###############################
# Decryption Configuration
# Decryptor.Column0.Key
# Decryptor.Column0.Algorithm
# Decryptor.Column0.Encoding
# ...
# Decryptor.ColumnN.Key
# Decryptor.ColumnN.Algorithm
# Decryptor.ColumnN.Encoding
###############################
#
# Decryptor.ColumnN.Key
#
# Specifies key name for a column to be decrypted. If a column does not need to
# be decrypted, do not specify this parameter. If this parameter is specified,
# all other decryption parameters for the same column must also be specified.
#
Decryptor.Column0.Key =
#
# Decryptor.ColumnN.Algorithm
#
# Specifies decryption algorithm for a column to be decrypted. If a column
# does not need to be decrypted, do not specify this parameter. If this
# parameter is specified, all other decryption parameters for the same column
# must also be specified.
#
# Valid values
# AES/CBC/PKCS5Padding
#
Decryptor.Column0.Algorithm =
#
# Decryptor.ColumnN.Encoding
#
# Specifies encoding for a column to be decrypted. If a column does not need to
# be decrypted, do not specify this parameter. If this parameter is specified,
# all other decryption parameters for the same column must also be specified.
#
# Valid values
# Base16
# Base64
#
Decryptor.Column0.Encoding =
###########################################
# Tokenization Configuration
# Tokenizer.Column0.TokenVault
# Tokenizer.Column0.CustomDataColumnIndex
# Tokenizer.Column0.TokenFormat
# Tokenizer.Column0.LuhnCheck
# ...
# Tokenizer.ColumnN.TokenVault
# Tokenizer.ColumnN.CustomDataColumnIndex
# Tokenizer.ColumnN.TokenFormat
# Tokenizer.ColumnN.LuhnCheck
############################################
#
# Tokenizer.ColumnN.TokenVault
#
# Specifies the name of the token vault that will receive the tokens for
# this column. If the column does not need to be tokenized, do not specify
# this parameter. If this parameter is specified, all other tokenization
# parameters for the same column must also be specified. Token vault
# specified in this parameter must exist before running bulk migration.
#
Tokenizer.Column5.TokenVault = BTM
Tokenizer.Column6.TokenVault = BTM_2
#
# Tokenizer.ColumnN.CustomDataColumnIndex
#
# Specifies zero-based index of the column in the input file that contains
# custom data values for this column. If the column does not need to be
# tokenized, do not specify this parameter. If this parameter is specified,
# all other tokenization parameters for the same column must be specified.
#
Tokenizer.Column5.CustomDataColumnIndex = -1
Tokenizer.Column6.CustomDataColumnIndex = -1
#
# Tokenizer.ColumnN.TokenPropertyColumnIndex
#
# Specifies zero-based index of the column in the input file that contains
# token property values for this column. If the column does not need to be
# tokenized, do not specify this parameter. If this parameter is specified,
# all other tokenization parameters for the same column must be specified.
#
Tokenizer.Column5.TokenPropertyColumnIndex = 2
#
# Tokenizer.ColumnN.CustomTokenPropertyColumnIndex
#
# Specifies zero-based index of the column in the input file that contains
# custom token property values for this column. If the column does not need to be
# tokenized, do not specify this parameter. If this parameter is specified,
# all other tokenization parameters for the same column must be specified.
#
Tokenizer.Column5.CustomTokenPropertyColumnIndex = 3
#
# Tokenizer.ColumnN.TokenFormat
#
# Specifies token format that will be used to tokenize this column. If the
# column does not need to be tokenized, do not specify this parameter. If
# this parameter is specified, all other tokenization parameters for the
# same column must also be specified.
# Token format may be specified using format name for built-in formats,
# or format identifier for built-in and custom formats. It is recommended
# to use format names for built-in formats, as documented in the
#CipherTrust Vaulted TokenizationUser Guide.
# For custom formats, use format identifier
# returned by TokenService.createNewFormat API. The predefined formats are listed
# at the end of this chapter.
#
# Valid values
# <string>
# <number>
#
# <string> - is the name of a built-in token format, such as LAST_FOUR_TOKEN
# For complete list of supported token formats, refer to
#CipherTrust Vaulted Tokenization User Guide.
#
# <number> - is the format identifier (between 101 and 999) returned by
# TokenService.createNewFormat API.
#
Tokenizer.Column5.TokenFormat = LAST_FOUR_TOKEN
Tokenizer.Column6.TokenFormat = EMAIL_ADDRESS_TOKEN
#
# Tokenizer.ColumnN.LuhnCheck
#
# Specifies whether the generated token will pass or fail luhn check. If the
# column does not need to be tokenized, don't specify this parameter. If
# this parameter is specified, all other tokenization parameters for the
# same column must be specified.
#
# Valid values
# true
# false
#
Tokenizer.Column5.LuhnCheck = true
Tokenizer.Column6.LuhnCheck = false
######################
# Output Configuration
# Output.FilePath
# Output.Sequence
######################
#
# Output.FilePath
#
# Specifies full path to the output file
#
Output.FilePath = C:\\Desktop\\migration\\tokenized.csv
#
# Intermediate.FilePath
#
# Specifies the file path where the intermediate temporary chunks of outputs
# are stored.
#
# Note: If no intermediate file path is set, then the path specified in
# Output.FilePath is used as the intermediate file path.
#
Intermediate.FilePath =
#
# Output.Sequence
#
# Specifies sequence of the input columns in which they are written to the
# output file. Each column in the input file that has to appear in the
# output file has to have its column index specified in the output sequence.
# For each column in the input file, the sequence number can be either positive
# or negative. Positive sequence number indicates that the decrypted and/or
# tokenized value is written to the output file, if the column was decrypted
# and/or tokenized. Negative sequence number indicates that the original value
# from the input file is written to the output file. For columns that are not
# decrypted and not tokenized (pass-through columns) specifying positive or
# negative number has the same effect.
# Column indexes are separated by , character.
#
Output.Sequence = 0,-1,-2,-3,-4,5,6
###############################
# Multi-threading Configuration
# Threads.BatchSize
# Threads.CryptoThreads
# Threads.TokenThreads
# Threads.PollTimeout
###############################
#
# Threads.BatchSize
#
# Specifies number of rows per batch.
#
Threads.BatchSize = 10000
#
# Threads.CryptoThreads
#
# Specifies number of threads that will perform decryption of columns
# as required.
#
Threads.CryptoThreads = 5
#
# Threads.TokenThreads
#
# Specifies number of threads that will perform tokenization of columns
# as required.
#
Threads.TokenThreads = 2
#
# Threads.PollTimeout
#
# Specifies the amount of time (in milliseconds) processing threads will
# wait for a batch on the data queue before timing out, checking for
# adminitrative commands on the management queue, and then checking for
# another batch on the data queue.
# Default value of this parameter is 100.
# Do not modify this parameter unless instructed by customer support.
#
Threads.PollTimeout = 100
#
# Logger.LogLevel
#
# Specifies the level of details displayed
#
# Valid values
# Normal
# Verbose
#
Logger.LogLevel = Verbose
#
# Obfucate password
#
# Specifies if the provided passwords are obfuscated or not
#
# Valid values
# true
# false
# Note: Default value is set to false.
#
PasswordObfuscation = false
#
# Credential obfuscation
#
# If true, utility accepts KeyManager and database credentials obfuscated with obfuscator utility
#
# Valid values
# true
# false
# Note: Default value is set to false.
#
CredentialObfuscation = false
#
# TokenSeparator
#
# Specifies if the tokens generated are space separated or not.
# Note: This parameter is ignored if Input.Type is set to Delimited.
#
# Valid values
# true
# false
# Note: Default value is set to true.
#
TokenSeparator = true
#
# StreamInputData
#
# Specifies whether the input data is streamed or not.
#
# Valid values
# true
# false
# Note: Default value is set to false.
#
StreamInputData = false
Note: If StreamInputData is set to true, the TokenSeparator parameter is not considered.
#
# CodePageUsed
#
# Specifies the code page in use.
# Used with EBCDIC character set for ex. use "ibm500" for EBCDIC International
# https://docs.oracle.com/javase/7/docs/api/java/nio/charset/Charset.html
#
CodePageUsed =
Note: If no value is specified, by default, ASCII character set is used.
#
# FailureThreshold
#
# Specifies the number of errors after which the Bulk Utility aborts the
# tokenization operation.
# Valide values
# -1 = Tokenization continues irrespective of number of errors during the
# operation. This is the default value.
# 0 = Bulk utility aborts the operation on occurance of any error.
# Any positive value = Indicates the failure threshold, after which the Bulk
# Utility aborts the operation.
#
# Note: If no value or a negative value is specified, Bulk Utility will continue
# irrespective of number of errors.
#
FailureThreshold = -1
###############################################################################
# END
###############################################################################