Oracle DBMS_DATAPUMP
Version 12.1.0.1

General Information
Library Note Morgan's Library Page Footer
The Library is currently in the process of being upgraded from Oracle Database Version 11.2.0.3 to 12.1.0.1. Demos are being upgraded to reflect the new Container paradigm as well as EBR (Edition Based Redefinition) and may contain references to CDBs, PDBs, and other objects you may not be familiar with such as CDB_OBJECTS_AE: Welcome to 12c.
Purpose PL/SQL API to DataPump Import, Export, and Transportable Tablespaces
AUTHID CURRENT_USER
Constants
Name Data Type Value
Public
KU$_DUMPFILE_TYPE_DISK BINARY_INTEGER 0
KU$_DUMPFILE_TYPE_PIPE BINARY_INTEGER 1
KU$_DUMPFILE_TYPE_TAPE BINARY_INTEGER 2
KU$_DUMPFILE_TYPE_TEMPLATE BINARY_INTEGER 3
 
KU$_FILE_TYPE_DUMP_FILE BINARY_INTEGER 1
KU$_FILE_TYPE_BAD_FILE BINARY_INTEGER 2
KU$_FILE_TYPE_LOG_FILE BINARY_INTEGER 3
KU$_FILE_TYPE_SQL_FILE BINARY_INTEGER 4
 
KU$_JOB_COMPLETE NUMBER 1
KU$_JOB_COMPLETE_ERRORS NUMBER 2
KU$_JOB_STOPPED NUMBER 3
KU$_JOB_ABORTED NUMBER 4
 
KU$_JOB_VIEW_ALL NUMBER 0
KU$_JOB_TTS_TABLESPACES NUMBER 1
KU$_JOB_VIEW_ENCCOL_TABLES NUMBER 2
 
KU$_STATUS_WIP BINARY_INTEGER 1
KU$_STATUS_JOB_DESC BINARY_INTEGER 2
KU$_STATUS_JOB_STATUS BINARY_INTEGER 4
KU$_STATUS_JOB_ERROR BINARY_INTEGER 8
 
KU$_STATUS_VERSION_1 NUMBER 1
KU$_STATUS_VERSION_2 NUMBER 2
KU$_STATUS_VERSION_3 NUMBER 3
KU$_STATUS_VERSION_4 NUMBER 4
KU$_STATUS_VERSION NUMBER KU$_STATUS_VERSION_4
 
KU$_DFHDR_ENCPWD_MODE_UNKNOWN NUMBER 1
KU$_DFHDR_ENCPWD_MODE_NONE NUMBER 2
KU$_DFHDR_ENCPWD_MODE_PASSWORD NUMBER 3
KU$_DFHDR_ENCPWD_MODE_DUAL NUMBER 4
KU$_DFHDR_ENCPWD_MODE_TRANS NUMBER 5
 
KU$_DFHDR_ENCRYPTION_MODE NUMBER 22
KU$_DFHDR_ENCMODE_UNKNOWN NUMBER 1
KU$_DFHDR_ENCMODE_NONE NUMBER 2
KU$_DFHDR_ENCMODE_PASSWORD NUMBER 3
KU$_DFHDR_ENCMODE_DUAL NUMBER 4
KU$_DFHDR_ENCMODE_TRANS NUMBER 5
 
KU$_DFHDR_COMPRESSION_ALG NUMBER 23
KU$_DFHDR_CMPALG_UNKNOWN NUMBER 1
KU$_DFHDR_CMPALG_NONE NUMBER 2
KU$_DFHDR_CMPALG_BASIC NUMBER 3
KU$_DFHDR_CMPALG_LOW NUMBER 4
KU$_DFHDR_CMPALG_MEDIUM NUMBER 5
KU$_DFHDR_CMPALG_HIGH NUMBER 6
 
KU$_DATAOPT_SKIP_CONST_ERR NUMBER 1
KU$_DATAOPT_XMLTYPE_CLOB NUMBER 2
KU$_DATAOPT_NOTYPE_EVOL NUMBER 4
KU$_DATAOPT_DISABL_APPEND_HINT NUMBER 8
KU$_DATAOPT_REJECT_ROWS_REPCHR NUMBER 16
 
KU$_ICRPP_PREIMPORT NUMBER 0
KU$_ICRPP_FINAL NUMBER 1
KU$_ICRPP_EARLY NUMBER 2
 
KU$_ICRFLAGS_IS_EXPR NUMBER 1
KU$_ICRFLAGS_EARLY_IMPORT NUMBER 2
KU$_ICRFLAGS_GET_DEPENDENTS NUMBER 4
KU$_ICRFLAGS_EXCLUDE NUMBER 8
KU$_ICRFLAGS_XDB_NO_TTS NUMBER 16
 
KU$_COMPRESS_NONE NUMBER 1
KU$_COMPRESS_METADATA NUMBER 2
Item Codes for entry in a dump file info table (of type ku$_dumpfile_info)
KU$_DFHDR_FILE_VERSION NUMBER 1
KU$_DFHDR_MASTER_PRESENT NUMBER 2
KU$_DFHDR_GUID NUMBER 3
KU$_DFHDR_FILE_NUMBER NUMBER 4
KU$_DFHDR_CHARSET_ID NUMBER 5
KU$_DFHDR_CREATION_DATE NUMBER 6
KU$_DFHDR_FLAGS NUMBER 7
KU$_DFHDR_JOB_NAME NUMBER 8
KU$_DFHDR_PLATFORM NUMBER 9
KU$_DFHDR_INSTANCE NUMBER 10
KU$_DFHDR_LANGUAGE NUMBER 11
KU$_DFHDR_BLOCKSIZE NUMBER 12
KU$_DFHDR_DIRPATH NUMBER 13
KU$_DFHDR_METADATA_COMPRESSED NUMBER 14
KU$_DFHDR_DB_VERSION NUMBER 15
KU$_DFHDR_MASTER_PIECE_COUNT NUMBER 16
KU$_DFHDR_MASTER_PIECE_NUMBER NUMBER 17
KU$_DFHDR_DATA_COMPRESSED NUMBER 18
KU$_DFHDR_METADATA_ENCRYPTED NUMBER 19
KU$_DFHDR_DATA_ENCRYPTED NUMBER 20
KU$_DFHDR_COLUMNS_ENCRYPTED NUMBER 21
KU$_DFHDR_ENCPWD_MODE NUMBER 22
KU$_DFHDR_MAX_ITEM_CODE NUMBER 23
Job Description Data Types CREATE TYPE sys.ku$_ParamValue1010 AS OBJECT (
param_name    VARCHAR2(30),
param_op      VARCHAR2(30),
param_type    VARCHAR2(30),
param_length  NUMBER,
param_value_n NUMBER,
param_value_t VARCHAR2(2000));
/

CREATE TYPE sys.ku$_ParamValues1010 AS TABLE OF sys.ku$_ParamValue1010;

CREATE TYPE sys.ku$_JobDesc1010 AS OBJECT (
job_name      VARCHAR2(30),
guid          RAW(16),
operation     VARCHAR2(30),
job_mode      VARCHAR2(30),
remote_link   VARCHAR2(4000),
owner         VARCHAR2(30),
instance      VARCHAR2(16),
db_version    VARCHAR2(30),
creator_privs VARCHAR2(30),
start_time    DATE,
max_degree    NUMBER,
log_file      VARCHAR2(4000),
sql_file      VARCHAR2(4000),
params ku$_ParamValues1010);
/

CREATE TYPE sys.ku$_JobDesc1020 IS OBJECT (
job_name        VARCHAR2(30),    -- The job name
guid            RAW(16),         -- The job GUID
operation       VARCHAR2(30),    -- Current operation
job_mode        VARCHAR2(30),    -- Current mode
remote_link     VARCHAR2(4000),  -- DB link, if any
owner           VARCHAR2(30),    -- Job owner
platform        VARCHAR2(101),   -- Current job platform
exp_platform    VARCHAR2(101),   -- Export platform
global_name     VARCHAR2(4000),  -- Current global name
exp_global_name VARCHAR2(4000),  -- Export global name
instance        VARCHAR2(16),    -- The instance name
db_version      VARCHAR2(30),    -- Version of objects
exp_db_version  VARCHAR2(30),    -- Export version
scn             NUMBER,          -- Job SCN
creator_privs   VARCHAR2(30),    -- Privs of job
start_time      DATE,            -- This job start time
exp_start_time  DATE,            -- Export start time
term_reason     NUMBER,          -- Job termination code
max_degree      NUMBER,          -- Max. parallelism
log_file        VARCHAR2(4000),  -- Log file name
sql_file        VARCHAR2(4000),  -- SQL file name
params          ku$_ParamValues1010)  -- Parameter list
/

CREATE TYPE sys.ku$_JobDesc1210 IS OBJECT (
job_name         VARCHAR2(30),        -- The job name
guid             RAW(16),             -- The job GUID
operation        VARCHAR2(30),        -- Current operation
job_mode         VARCHAR2(30),        -- Current mode
remote_link      VARCHAR2(4000),      -- DB link, if any
owner            VARCHAR2(30),        -- Job owner
platform         VARCHAR2(101),       -- Current job platform
exp_platform     VARCHAR2(101),       -- Export platform
global_name      VARCHAR2(4000),      -- Current global name
exp_global_name  VARCHAR2(4000),      -- Export global name
instance         VARCHAR2(16),        -- The instance name
db_version       VARCHAR2(30),        -- Cur. server software version
exp_db_version   VARCHAR2(30),        -- Export svr. software version
job_version      VARCHAR2(30),        -- Negotiated data version
scn              NUMBER,              -- Job SCN
creator_privs    VARCHAR2(30),        -- Privs of job
start_time       DATE,                -- This job start time
exp_start_time   DATE,                -- Export start time
term_reason      NUMBER,              -- Job termination code
max_degree       NUMBER,              -- Max. parallelism
timezone         VARCHAR2(64),        -- Cur. server timezone
exp_timezone     VARCHAR2(64),        -- Exp. server timezone
tstz_version     NUMBER,              -- Cur. server timezone version
exp_tstz_version NUMBER,              -- Exp. server timezone
endianness       VARCHAR2(16),        -- Cur. platform's endianness
exp_endianness   VARCHAR2(16),        -- Exp. platform's endianness endianness is 'BIG' or 'LITTLE'
charset          VARCHAR2(28),        -- Cur. server charset
exp_charset      VARCHAR2(28),        -- Exp. server charset
ncharset         VARCHAR2(28),        -- Cur. server national charset
exp_ncharset     VARCHAR2(28),        -- Exp. server national charset
log_file         VARCHAR2(4000),      -- Log file name
sql_file         VARCHAR2(4000),      -- SQL file name
params           ku$_ParamValues1010) -- Parameter list
/
DumpFile Types CREATE TYPE sys.ku$_DumpFile1010 AS OBJECT (
file_name          VARCHAR2(4000),
file_type          NUMBER,
file_size          NUMBER,
file_bytes_written NUMBER);
/

CREATE TYPE sys.ku$_DumpFileSet1010 AS TABLE OF sys.ku$_DumpFile1010;
/

CREATE TYPE sys.ku$_dumpfile_item IS OBJECT (
item_code NUMBER,           -- Identifies header item
value     VARCHAR2(2048));  -- Text string value
/
Executable-Package Equivalencies
Executable Package
ACCESS_METHOD dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'AUTOMATIC');
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'DIRECT_PATH');
dbms_datapump.set_parameter(handle => h1, name => 'DATA_ACCESS_METHOD', value => 'EXTERNAL_TABLE');
CLUSTER dbms_datapump.start_job(xxx,xxx,xxx,cluster_ok=>0,xxx,xx); = 'N'
dbms_datapump.start_job(xxx,xxx,xxx,cluster_ok=>1,xxx,xx); = 'Y'
COMPRESSION dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'ALL');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'DATA_ONLY');
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'COMPRESSION', value => 'METADATA_ONLY');
CONTENT dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 1); ALL
dbms_datapump.set_parameter(handle => h1, name => 'INCLUDE_METADATA', value => 0); DATA_ONLY
dbms_datapump.data_filter(handle => h1, name => 'INCLUDE_ROWS', value => 0); METADATA_ONLY
DATA_OPTIONS dbms_datapump.set_parameter(handle => l_dp_handle, name => 'DATA_OPTIONS', value => dbms_datapump.KU$_DATAOPT_XMLTYPE_CLOB);
DIRECTORY See dumpfile/logfile
DUMPFILE dbms_datapump.add_file(handle => l_dp_handle, filename => 'test.dmp', directory => 'SCRATCH');
ENCRYPTION dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'ALL');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'DATA_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'ENCRYPTED_COLUMNS_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'METADATA_ONLY');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION', value => 'NONE');
ENCRYPTION_ALGORITHM dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES128');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES192');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_ALGORITHM', value => 'AES256');
ENCRYPTION_MODE dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'DUAL');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'PASSWORD');
dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_MODE', value => 'TRANSPARENT');
ENCRYPTION_PASSWORD dbms_datapump.set_parameter(handle => h1, name => 'ENCRYPTION_PASSWORD', value => 'secret password');
ESTIMATE dbms_datapump.set_parameter(handle => h1, name => 'ESTIMATE', value => 'BLOCKS');
ESTIMATE_ONLY dbms_datapump.set_parameter(handle => h1, name => 'ESTIMATE_ONLY', value => 1);
EXCLUDE dbms_datapump.metadata_filter(l_dp_handle, 'SCHEMA_EXPR', '<> (''SCHEMANAME'')');
dbms_datapump.metadata_filter(l_dp_handle, 'EXCLUDE_PATH_EXPR', 'IN (''INDEX'', ''SYNONYMS'', ''GRANTS'', ''STATISTICS'')');
FILESIZE dbms_datapump.add_file(handle => h1, filename => 'test.dmp', directory => 'SCRATCH',filesize => '1024000', reusefile => 1);
FLASHBACK_SCN dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_SCN', value => nnnnnnnn);
dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_SCN', value => TIMESTAMP_TO_SCN( TO_TIMESTAMP( TO_CHAR( SYSDATE))));
FLASHBACK_TIME dbms_datapump.set_parameter(handle => h1, name => 'FLASHBACK_TIME', value => 'TO_TIMESTAMP( TO_CHAR(SYSDATE) )');
FULL h1 := dbms_datapump.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => NULL, version => 'LATEST');
INCLUDE dbms_datapump.metadata_filter(l_dp_handle, 'SCHEMA_EXPR', 'IN (''SCHEMANAME'')');
JOB_NAME h1 := dbms_datapump.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => NULL, version => 'LATEST',job_name =>'MY JOB NAME');
KEEP_MASTER dbms_datapump.set_parameter(handle => l_dp_handle, name => 'KEEP_MASTER', value => 1);
dbms_datapump.set_parameter(handle => l_dp_handle, name => 'KEEP_MASTER', value => 0);
LOGFILE dbms_datapump.add_file(handle => l_dp_handle, filename => 'test.log', directory => 'SCRATCH', filetype => dbms_datapump.KU$_FILE_TYPE_LOG_FILE, reusefile => 1);
METRICS dbms_datapump.metadata_filter(HANDLE => HANDLR, NAME => 'METRICS', VALUE => 0);
dbms_datapump.metadata_filter(HANDLE => HANDLR, NAME => 'METRICS', VALUE => 1);
NETWORK_LINK h1 := dbms_datapump.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => 'DB_LINK_NAME', version => 'LATEST',job_name =>'MY JOB NAME');
NOLOGFILE Do not specify a logfile
PARALLEL dbms_datapump.set_parallel(handle => h1, degree => n);
QUERY dbms_datapump.data_filter(handle => h1, name => 'SUBQUERY', value => 'WHERE col1 = 1', table_name => null, schema_name => null);
REMAP_DATA dbms_datapump.data_remap(handle => h1, name => 'COLUMN_FUNCTION', table_name => 'TAB1', column => 'COL1', function => 'UWCLASS.TESTPACKAGE.SETTESTID', schema => 'UWCLASS');
REUSE_DUMPFILES dbms_datapump.add_file(handle => l_dp_handle, filename => 'test.dmp', directory => 'CTEMP', reusefile => 1);
SAMPLE dbms_datapump.data_filter(handle => h1, name => 'SAMPLE', value => '10, table_name => NULL, schema_name => NULL);
SCHEMAS dbms_datapump.metadata_filter(l_dp_handle, 'SCHEMA_LIST', '''ALIGNE''');
SERVICE_NAME DBMS_DATAPUMP.start_job(l_dp_handle,service_name=>'MY_SERVICE_NAME');
SOURCE_EDITION dbms_datapump.set_parameter(handle => h1, name => 'SOURCE_EDITION', value => 'edition name');
TABLES dbms_datapump.metadata_filter(handle => h1, name => 'NAME_EXPR', value => 'IN (''TEST'',''TEST2'')', object_type => 'TABLE');
TABLESPACES dbms_datapump.metadata_filter(handle => h1, name => 'TABLESPACE_EXPR' value => 'IN (''TBS1'',''TBS2'')', object_type => 'TABLESPACE');
TRACE dbms_datapump.set_debug(debug_flags => TO_NUMBER('1FF0300','XXXXXXXXXXXXX'), version_flag=>1);
TRANSPORTABLE dbms_datapump.set_parameter(handle => h1, name => 'TRANSPORTABLE', value => 'ALWAYS');
dbms_datapump.set_parameter(handle => h1, name => 'TRANSPORTABLE', value => 'NEVER');
TRANSPORT_FULL_CHECK dbms_datapump.set_parameter(handle => h1, name => 'TTS_FULL_CHECK', value => 0); N
dbms_datapump.set_parameter(handle => h1, name => 'TTS_FULL_CHECK', value => 1); Y
VERSION h1 := DBMS_DATAPUMP.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => 'DB_LINK_NAME', version => 'LATEST', job_name =>'MY JOB NAME');
h1 := DBMS_DATAPUMP.open(operation => 'EXPORT', job_mode => 'FULL', remote_link => 'DB_LINK_NAME', version => 'COMPATIBLE',job_name =>'MY JOB NAME');
(no equivalent) dbms_datapump.set_parameter(handle => h1, name => 'USER_METADATA', value => 1); export create user statement in schema mode (only if datapump_exp_full_database role is granted)
dbms_datapump.set_parameter(handle => h1, name => 'USER_METADATA', value => 0); don't export create user statement in schema mode
Job Status Types CREATE TYPE sys.ku$_JobStatus1010 AS OBJECT (
job_name           VARCHAR2(30),
operation          VARCHAR2(30),
job_mode           VARCHAR2(30),
bytes_processed    NUMBER,
total_bytes        NUMBER,
percent_done       NUMBER,
degree             NUMBER,
error_count        NUMBER,
state              VARCHAR2(30),
phase              NUMBER,
restart_count      NUMBER,
worker_status_list ku$_WorkerStatusList1010,
files              ku$_DumpFileSet1010)

CREATE TYPE sys.ku$_JobStatus1020 IS OBJECT (
job_name           VARCHAR2(30),  -- Name of the job
operation          VARCHAR2(30),  -- Current operation
job_mode           VARCHAR2(30),  -- Current mode
bytes_processed    NUMBER,        -- Bytes so far
total_bytes        NUMBER,        -- Total bytes for job
percent_done       NUMBER,        -- Percent done
degree             NUMBER,        -- Of job parallelism
error_count        NUMBER,        -- #errors so far
state              VARCHAR2(30),  -- Current job state
phase              NUMBER,        -- Job phase
restart_count      NUMBER,        -- #Job restarts
worker_status_list ku$_WorkerStatusList1020, -- For (non-idle) job worker processes
files ku$_DumpFileSet1010);  -- Dump file info
/

CREATE TYPE sys.ku$_JobStatus1120 IS OBJECT (
job_name           VARCHAR2(30),             -- Name of the job
operation          VARCHAR2(30),             -- Current operation
job_mode           VARCHAR2(30),             -- Current mode
bytes_processed    NUMBER,                   -- Bytes so far
total_bytes        NUMBER,                   -- Total bytes for job
percent_done       NUMBER,                   -- Percent done
degree             NUMBER,                   -- Of job parallelism
error_count        NUMBER,                   -- #errors so far
state              VARCHAR2(30),             -- Current job state
phase              NUMBER,                   -- Job phase
restart_count      NUMBER,                   -- #Job restarts
worker_status_list ku$_WorkerStatusList1120, -- For (non-idle) job worker processes
files ku$_DumpFileSet1010);                  -- Dump file info
/

CREATE TYPE sys.ku$_JobStatus1210 IS OBJECT
(
job_name        VARCHAR2(30), -- Name of the job
operation       VARCHAR2(30), -- Current operation
job_mode        VARCHAR2(30), -- Current mode
bytes_processed NUMBER, -- Bytes so far
total_bytes     NUMBER, -- Total bytes for job
percent_done    NUMBER, -- Percent done
degree          NUMBER, -- Of job parallelism
error_count     NUMBER, -- #errors so far
state           VARCHAR2(30), -- Current job state
phase           NUMBER, -- Job phase
restart_count   NUMBER, -- #Job restarts worker_status_list ku$_WorkerStatusList1210,
                        -- For (non-idle)
                        -- job worker processes
files           ku$_DumpFileSet1010) -- Dump file info
/

CREATE TYPE sys.ku$_Status1120 IS OBJECT (
mask            NUMBER,             -- Status types present
wip             ku$_LogEntry1010,   -- Work in progress
job_description ku$_JobDesc1020,    -- Complete job description
job_status      ku$_JobStatus1120,  -- Detailed job status
error           ku$_LogEntry1010)   -- Multi-level context errors
/

CREATE TYPE sys.ku$_Status1210 IS OBJECT (
mask            NUMBER,            -- Status types present
wip             ku$_LogEntry1010,  -- Work in progress
job_description ku$_JobDesc1210,   -- Complete job description
job_status      ku$_JobStatus1210, -- Detailed job status
error           ku$_LogEntry1010)  -- Multi-level context errors
/
Log Entry & Error Types CREATE TYPE sys.ku$_LogLine1010 AS OBJECT (
logLineNumber NUMBER,
errorNumber   NUMBER,
LogText       VARCHAR2(2000) );
/

CREATE PUBLIC SYNONYM ku$_LogLine1010
FOR sys.ku$_LogLine1010;

CREATE TYPE sys.ku$_LogEntry1010 AS TABLE OF
sys.ku$_LogLine1010;
/
CREATE PUBLIC SYNONYM ku$_LogEntry1010
FOR sys.ku$_LogEntry1010;
Status Types CREATE TYPE sys.ku$_status1010 AS OBJECT (
mask            NUMBER,
wip             ku$_LogEntry1010,
job_description ku$_JobDesc1010,
job_status      ku$_JobStatus1010,
error           ku$_LogEntry1010);
/

CREATE TYPE sys.ku$_Status1020 IS OBJECT (
mask            NUMBER,             -- Status types present
wip             ku$_LogEntry1010,   -- Work in progress
job_description ku$_JobDesc1020,    -- Complete job description
job_status      ku$_JobStatus1020,  -- Detailed job status
error           ku$_LogEntry1010)   -- Multi-level context errors
/

CREATE TYPE sys.ku$_Status1120 IS OBJECT (
mask            NUMBER,             -- Status types present
wip             ku$_LogEntry1010,   -- Work in progress
job_description ku$_JobDesc1020,    -- Complete job description
job_status      ku$_JobStatus1120,  -- Detailed job status
error           ku$_LogEntry1010)   -- Multi-level context errors
/
Worker Status Types CREATE TYPE sys.ku$_WorkerStatus1010 AS OBJECT (
worker_number     NUMBER,
process_name      VARCHAR2(30),
state             VARCHAR2(30),
schema            VARCHAR2(30),
name              VARCHAR2(4000),
object_type       VARCHAR2(200),
partition         VARCHAR2(30),
completed_objects NUMBER,
total_objects     NUMBER,
completed_rows    NUMBER,
completed_bytes   NUMBER,
percent_done      NUMBER)
/

CREATE TYPE sys.ku$_WorkerStatusList1010 AS TABLE OF sys.ku$_WorkerStatus1010
/

CREATE TYPE sys.ku$_WorkerStatus1020 AS OBJECT (
worker_number     NUMBER,          -- Worker process identifier
process_name      VARCHAR2(30),    -- Worker process name
state             VARCHAR2(30),    -- Worker process state
schema            VARCHAR2(30),    -- Schema name
name              VARCHAR2(4000),  -- Object name
object_type       VARCHAR2(200),   -- Object type
partition         VARCHAR2(30),    -- Partition name
completed_objects NUMBER,          -- Completed number of objects
total_objects     NUMBER,          -- Total number of objects
completed_rows    NUMBER,          -- Number of rows completed
completed_bytes   NUMBER,          -- Number of bytes completed
percent_done      NUMBER,          -- Percent done current object
degree            NUMBER);         -- Degree of parallelism
/

CREATE TYPE sys.ku$_WorkerStatus1120 AS OBJECT (
worker_number     NUMBER,         -- Worker process identifier
process_name      VARCHAR2(30),   -- Worker process name
state             VARCHAR2(30),   -- Worker process state
schema            VARCHAR2(30),   -- Schema name
name              VARCHAR2(4000), -- Object name
object_type       VARCHAR2(200),  -- Object type
partition         VARCHAR2(30),   -- Partition name
completed_objects NUMBER,         -- Completed number of objects
total_objects     NUMBER,         -- Total number of objects
completed_rows    NUMBER,         -- Number of rows completed
completed_bytes   NUMBER,         -- Number of bytes completed
percent_done      NUMBER,         -- Percent done current object
degree            NUMBER,         -- Degree of parallelism
instance_id       NUMBER);        -- Instance ID where running
/

CREATE TYPE sys.ku$_WorkerStatus1210 AS OBJECT
(
worker_number     NUMBER,         -- Worker process identifier
process_name      VARCHAR2(30),   -- Worker process name
state             VARCHAR2(30),   -- Worker process state
schema            VARCHAR2(30),   -- Schema name
name              VARCHAR2(4000), -- Object name
object_type       VARCHAR2(200),  -- Object type
partition         VARCHAR2(30),   -- Partition name
completed_objects NUMBER,         -- Completed number of objects
total_objects     NUMBER,         -- Total number of objects
completed_rows    NUMBER,         -- Number of rows completed
completed_bytes   NUMBER,         -- Number of bytes completed
percent_done      NUMBER,         -- Percent done current object
degree            NUMBER,         -- Degree of parallelism
instance_id       NUMBER,         -- Instance ID where running
instance_name     VARCHAR2(60),   -- Instance Name where running
host_name         VARCHAR2(64));  -- Host name where running

CREATE TYPE sys.ku$_WorkerStatusList1010 AS TABLE OF sys.ku$_WorkerStatus1010
/
CREATE TYPE sys.ku$_WorkerStatusList1020 AS TABLE OF sys.ku$_WorkerStatus1020
/
CREATE TYPE sys.ku$_WorkerStatusList1120 AS TABLE OF sys.ku$_WorkerStatus1120
/
CREATE TYPE sys.ku$_WorkerStatusList1210 AS TABLE OF sys.ku$_WorkerStatus1210
/
Dependencies
AMGT$DATAPUMP KU$_DUMPFILE1010 KUPC$_MASTERERROR
DBMS_AQ_SYS_IMP_INTERNAL KU$_DUMPFILESET1010 KUPC$_MASTERJOBINFO
DBMS_ASSERT KU$_DUMPFILE_INFO KUPC$_MASTER_KEY_EXCHANGE
DBMS_AW_EXP KU$_JOBDESC1010 KUPC$_MESSAGE
DBMS_CDC_DPUTIL KU$_JOBDESC1020 KUPC$_METADATA_FILTER
DBMS_CDC_EXPDP KU$_JOBDESC1210 KUPC$_METADATA_REMAP
DBMS_CSX_ADMIN KU$_JOBSTATUS1010 KUPC$_METADATA_TRANSFORM
DBMS_CUBE_EXP KU$_JOBSTATUS1020 KUPC$_OPEN
DBMS_DATAPUMP_UTL KU$_JOBSTATUS1120 KUPC$_RESTART
DBMS_DM_EXP_INTERNAL KU$_JOBSTATUS1210 KUPC$_SET_PARALLEL
DBMS_DM_MODEL_EXP KU$_LOGENTRY KUPC$_SET_PARAMETER
DBMS_DM_MODEL_IMP KU$_PARAMVALUE1010 KUPC$_SHADOW_KEY_EXCHANGE
DBMS_DST KU$_PARAMVALUES1010 KUPC$_START_JOB
DBMS_FILE_GROUP_UTL_INVOK KU$_STATUS KUPC$_STOP_JOB
DBMS_LOB KU$_STATUS1010 KUPCC
DBMS_LOCK KU$_STATUS1020 KUPD$DATA
DBMS_LOGREP_EXP KU$_STATUS1120 KUPF$FILE
DBMS_LOGREP_IMP KU$_STATUS1210 KUPM$MCP
DBMS_METADATA KU$_WORKERSTATUS1010 KUPP$PROC
DBMS_METADATA_UTIL KU$_WORKERSTATUS1020 KUPU$UTILITIES_INT
DBMS_NETWORK_ACL_ADMIN KU$_WORKERSTATUS1120 KUPV$FT
DBMS_OUTPUT KU$_WORKERSTATUS1210 KUPV$FT_INT
DBMS_REPCAT_EXP KU$_WORKERSTATUSLIST1010 KUPW$WORKER
DBMS_RULE_EXP_UTL KU$_WORKERSTATUSLIST1020 LBAC_SYSDBA
DBMS_SCHED_MAIN_EXPORT KU$_WORKERSTATUSLIST1120 LOGSTDBY_INTERNAL
DBMS_SQLTCB_INTERNAL KU$_WORKERSTATUSLIST1210 LT_EXPORT_PKG
DBMS_STREAMS_DATAPUMP KUPC$QUEUE OLS$DATAPUMP
DBMS_STREAMS_DATAPUMP_UTIL KUPC$QUEUE_INT ORDIMDPCALLOUTS
DBMS_STREAMS_TABLESPACE_ADM KUPC$_ADD_DEVICE OWM_VSCRIPT_PKG
DBMS_STREAMS_TBS_INT_INVOK KUPC$_ADD_FILE PLITBLM
DBMS_SWRF_INTERNAL KUPC$_API_ACK PSTDY_DATAPUMP_SUPPORT
DBMS_SYS_ERROR KUPC$_DATA_FILTER SA_COMPONENTS
DBMS_UTILITY KUPC$_DATA_REMAP TSDP$DATAPUMP
DBMS_WRR_INTERNAL KUPC$_JOBINFO XIMETADATA_PKG
DBMS_XDBUTIL_INT KUPC$_LOG_ENTRY XMLTYPE
DMP_SEC KUPC$_LOG_ERROR  
Documented Yes
Exceptions
Error Code Reason
ORA-39001 INVALID_ARGVAL: An invalid value was supplied for an input parameter
ORA-39002 INVALID_OPERATION: Cannot be executed because of inconsistencies between the API and the job
ORA-39004 INVALID_STATE: The state of the job precludes the execution of the API
ORA-39005 INCONSISTENT_ARGS: Inconsistent arguments 
ORA-39006 INTERNAL_ERROR: Internal datapump exception
ORA-31623 INVALID_HANDLE: Incorrect handle specified for the job
ORA-31626 NO_SUCH_JOB: A invalid reference to a job which is no longer executing
ORA-31627 SUCCESS_WITH_INFO: User specified job parameters that yielded informational messages
ORA-31631 PRIVILEGE_ERROR: The necessary privileges are not available for operations
ORA-31634 JOB_EXISTS: Job creation or restart failed due to duplicate name
ORA-39211 NO_DUMPFILE_INFO: User specified an invalid or inaccessible file
ORA-39327 WARNING_DV_NOENCRYPT: Database Vault data is being stored unencrypted in dump file set
Filters
Name Object Type Meaning
INCLUDE_NAME_EXPR
and
EXCLUDE_NAME_EXPR
Named objects Defines which object names are included in (INCLUDE_NAME_EXPR), or excluded from (EXCLUDE_NAME_EXPR), the job. You use the object_type parameter to limit the filter to a particular object type.

For Table mode, identifies which tables are to be processed.
SCHEMA_EXPR Schema objects Restricts the job to objects whose owning schema name is satisfied by the expression.

For Table mode, only a single SCHEMA_EXPR filter is supported. If specified, it must only specify a single schema (for example, 'IN (''SCOTT'')').

For Schema mode, identifies which users are to be processed.
TABLESPACE_EXPR Table, Cluster, Index, Rollback Segment Restricts the job to objects stored in a tablespace whose name is satisfied by the expression.

For Tablespace mode, identifies which tablespaces are to be processed. If a partition of an object is stored in the tablespace, the entire object is added to the job.

For Transportable mode, identifies which tablespaces are to be processed. If a table has a single partition in the tablespace set, all partitions must be in the tablespace set. An index is not included within the tablespace set unless all of its partitions are in the tablespace set. A domain index is not included in the tablespace set unless all of its secondary objects are included in the tablespace set.
INCLUDE_PATH_EXPR and EXCLUDE_PATH_EXPR All Defines which object paths are included in, or excluded from, the job. You use these filters to select only certain object types from the database or dump file set. Objects of paths satisfying the condition are included (INCLUDE_PATH_EXPR) or excluded (EXCLUDE_PATH_EXPR) from the operation. The object_path parameter is not supported for these filters.
First Available 10gR1
Remaps
Name Data Type Object Type Meaning
REMAP_DATAFILE Text Library, Tablespace, Directory Any datafile reference in the job that matches the object_type parameter and referenced the old_value datafile will be redefined to use the value datafile.
REMAP_SCHEMA Text Schema Objects Any schema object in the job that matches the object_type parameter and was located in the old_value schema will be moved to the value schema.
REMAP_TABLESPACE Text Table, Index, Rollback Segment, Materialized View, Materialized View Log, Tablespace Any storage segment in the job that matches the object_type parameter and was located in the old_value tablespace will be relocated to the value tablespace.
Security Model Owned by SYS with EXECUTE granted to PUBLIC
System Privileges
CREATE DIRECTORY EXP_FULL_DATABASE IMP_FULL_DATABASE
Source {ORACLE_HOME}/rdbms/admin/dbmsdp.sql
{ORACLE_HOME}/rdbms/admin/dbmspump.sql
Subprograms
Transforms
Name Data Type Object Type Description
SEGMENT_ATTRIBUTES Number Table, Index If nonzero (TRUE), emit storage segment parameters. Defaults to 1
STORAGE Number Table If nonzero (TRUE), emit storage clause. (Ignored if SEGMENT_ATTRIBUTES is zero.) Defaults to nonzero (TRUE)
 
ADD_DEVICE
Adds a sequential device to the dump file set for Export, Import, or Sql_file operations dbms_datapump.add_device(
handle      IN NUMBER,                  -- job handle
device_name IN VARCHAR2,                -- name of device being added
volume_size IN VARCHAR2 DEFAULT NULL);  -- device storage capacity
TBD
 
ADD_FILE
Adds files to the dump file set for an Export, Import, or SQL_FILE operation or specifies the log file or the output file for a SQL_FILE operation dbms_datapump.add_file(
handle    IN NUMBER,
filename  IN VARCHAR2,
directory IN VARCHAR2 DEFAULT NULL,
filesize  IN VARCHAR2 DEFAULT NULL,
filetype  IN NUMBER   DEFAULT dbms_datapump.KU$_FILE_TYPE_DUMP_FILE,
reusefile IN NUMBER   DEFAULT NULL);
See Export Demo Below
 
ATTACH
Used to gain access to a Data Pump job that is in the Defining, Executing, Idling, or Stopped state dbms_datapump.attch(
job_name  IN VARCHAR2 DEFAULT NULL,
job_owner IN VARCHAR2 DEFAULT NULL)
RETURN NUMBER;
See LOG_ENTRY Demo Below
 
CLIENT_LOB_APPEND
CLOB helper routine dbms_datapump.client_lob_append(
value    IN VARCHAR2,
position IN NUMBER
as_is    IN NUMBER DEFAULT 0);
TBD
 
CLIENT_LOB_DELETE
CLOB helper routine dbms_datapump.client_lob_delete;
TBD
 
CLIENT_LOB_GET
CLOB helper routine dbms_datapump.client_lob_get RETURN CLOB;
TBD
 
CREATE_JOB_VIEW
Create view into master table for a job

Overload 1
dbms_datapump.create_job_view(
job_schema IN VARCHAR2,
job_name   IN VARCHAR2,
view_name  IN VARCHAR2,
view_type  IN VARCHAR2 DEFAULT dbms_datapump.ku$_job_view_all);
TBD
Overload 2 dbms_datapump.create_job_view(
handle    IN NUMBER,
view_name IN VARCHAR2,
view_type IN VARCHAR2 DEFAULT dbms_datapump.ku$_job_view_all);
TBD
 
DATAPUMP_JOB
Returns TRUE if the code executing is a datapump job: Otherwise FALSE dbms_datapump.datapump_job RETURN BOOLEAN;
set serveroutput on

BEGIN
  IF dbms_datapump.datapump_job THEN
    dbms_output.put_line('Yes');
  ELSE
    dbms_output.put_line('No');
  END IF;
END;
/
 
DATA_FILTER
Specifies restrictions on the rows that are to be retrieved

Overload 1
dbms_datapump.data_filter (
handle      IN NUMBER,
name        IN VARCHAR2,               -- filter name
value       IN NUMBER,
table_name  IN VARCHAR2 DEFAULT NULL,  -- if not specified = all
schema_name IN VARCHAR2 DEFAULT NULL);
TBD
Overload 2 dbms_datapump.data_filter(
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN CLOB,
table_name  IN VARCHAR2 DEFAULT NULL,
schema_name IN VARCHAR2 DEFAULT NULL);
TBD
Overload 3 dbms_datapump.data_filter(
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN VARCHAR2,
table_name  IN VARCHAR2 DEFAULT NULL,
schema_name IN VARCHAR2 DEFAULT NULL);
TBD
 
DATA_REMAP
Modify the values of data in user tables dbms_datapump.data_remap(
handle     IN NUMBER,      -- job handle
name       IN VARCHAR2,
table_name IN VARCHAR2,
column     IN VARCHAR2,
function   IN VARCHAR2,
schema     IN VARCHAR2 DEFAULT NULL);
TBD
 
DETACH
Specifies that the user has no further interest in using the handle dbms_datapump.detach(handle IN NUMBER);
See Export Demo Below
 
DH_KEY_EXCHANGE (new 12.1)
Undocumented dbms_datapump.dh_key_exchange(
handle     IN  NUMBER,
source_key IN  VARCHAR2,
target_key OUT VARCHAR2,
digest     OUT VARCHAR2);
TBD
 
ESTABLISH_REMOTE_CONTEXT
Establish remote Data Pump job context dbms_datapump.establish_remote_context(worker_id IN NUMBER, remote_link IN VARCHAR2);
TBD
 
GET_DUMPFILE_INFO
Monitors the status of a job or waits for the completion of a job dbms_datapump.get_dumpfile_info(
file_name  IN  VARCHAR2,
directory  IN  VARCHAR2,
info_table OUT dbms_datapump.ku$_dumpfile_info,
filetype   OUT NUMBER);
TBD
 
GET_STATUS (new 12.1 overload)
Monitors the status of a job or waits for the completion of a job or for more details on API errors

Overload 1
dbms_datapump.get_status(
handle  IN NUMBER,
mask    IN INTEGER,
timeout IN NUMBER DEFAULT NULL)
RETURN dbms_datapump.ku$_status;
TBD
Overload 2 dbms_datapump.get_status(
handle    IN  NUMBER,
mask      IN  INTEGER,
timeout   IN  NUMBER DEFAULT NULL,
job_state OUT VARCHAR2,
status    OUT ku$_status1010);
TBD
Overload 3 dbms_datapump.get_status(
handle     IN  NUMBER,
mask       IN  INTEGER,
timeout    IN  NUMBER DEFAULT NULL,
job_state  OUT VARCHAR2,
status     OUT ku$_status1020);
See Export Demo Below
Overload 4 dbms_datapump.get_status(
handle     IN  NUMBER,
mask       IN  INTEGER,
timeout    IN  NUMBER DEFAULT NULL,
job_state  OUT VARCHAR2,
status     OUT ku$_status1120);
See Export Demo Below
Overload 5  
TBD
 
GET_STATUS_VERSION
Determine ku$_Status object version to use for network operations dbms_datapump.get_status_version(version IN NUMBER) RETURN NUMBER;
TBD
 
HAS_PRIVS
Returns TRUE is the privilege has been granted: Otherwise FALSE dbms_datapump.has_privs(oper IN VARCHAR2) RETURN BOOLEAN;
set serveroutput on

BEGIN
  IF dbms_datapump.has_privs('EXPORT') THEN
    dbms_output.put_line('Yes');
  ELSE
    dbms_output.put_line('No');
  END IF;
END;
/
 
LOG_ENTRY
Inserts a message into the log file dbms_datapump.log_entry (
handle        IN NUMBER,
message       IN VARCHAR2
log_file_only IN NUMBER DEFAULT 0);
DECLARE
 dph NUMBER;
BEGIN
  dph := dbms_datapump.attach('EXAMPLE5', USER);
  dbms_output.put_line(dph);

  dbms_datapump.log_entry(dph, 'Log entry from DataPump API');
  dbms_datapump.detach(dph);
END;
/
 
LOG_ERROR
Undocumented dbms_datapump.log_error(
handle        IN NUMBER,
message       IN VARCHAR2,
error_number  IN NUMBER DEFAULT 0,
fatal_error   IN NUMBER DEFAULT 0,
log_file_only IN NUMBER DEFAULT 0);
TBD
 
METADATA_FILTER
Creates a filters that restricts the items that are included in a job

Overload 1
dbms_datapump.metadata_filter (
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN VARCHAR2,
object_path IN VARCHAR2 DEFAULT NULL,
object_type IN VARCHAR2 DEFAULT NULL);
See Export Demo Below
Overload 2 dbms_datapump.metadata_filter (
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN CLOB,
object_path IN VARCHAR2 DEFAULT NULL,
object_type IN VARCHAR2 DEFAULT NULL);
TBD
 
METADATA_REMAP
Specifies a remapping to be applied to objects as they are processed in the specified job dbms_datapump.metadata_remap (
handle      IN NUMBER,
name        IN VARCHAR2,
old_value   IN VARCHAR2,
value       IN VARCHAR2,
object_type IN VARCHAR2 DEFAULT NULL);
TBD
 
METADATA_TRANSFORM
Specifies transformations to be applied to objects as they are processed in the specified job

Overload 1
dbms_datapump.metadata_transform (
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN VARCHAR2,
object_type IN VARCHAR2 DEFAULT NULL);
TBD
Overload 2 dbms_datapump.metadata_transform (
handle      IN NUMBER,
name        IN VARCHAR2,
value       IN NUMBER,
object_type IN VARCHAR2 DEFAULT NULL);
TBD
 
OPEN
Declare a new job using the Data Pump API. The handle that is returned is used as a parameter for calls to all other procedures except ATTACH. dbms_datapump.open (
operation   IN VARCHAR2,
job_mode    IN VARCHAR2,
remote_link IN VARCHAR2 DEFAULT NULL,
job_name    IN VARCHAR2 DEFAULT NULL,
version     IN VARCHAR2 DEFAULT 'COMPATIBLE',
compression IN NUMBER DEFAULT dbms_datapump.ku$_compress_metadata)
RETURN NUMBER;

Job Mode Description
FULL Full database excluding the SYS, XDB,ORDSYS, MDSYS, CTXSYS, ORDPLUGINS, and LBACSYS schemas
SCHEMA Operates on a set of selected schemas. Defaults to current user. All objects in the selected schemas are processed. Users cannot specify schemas noted above
TABLE Operates on a set of selected tables. Defaults to all of the tables in the current user's schema. Only tables and their dependent objects are processed.
TABLESPACE Operates on a set of selected tablespaces. No defaulting is performed. Tables that have storage in the specified tablespaces are processed in the same manner as in Table mode.
TRANSPORTABLE Operates on metadata for tables (and their dependent objects) within a set of selected tablespaces to perform a transportable tablespace export/import.
See Export Demo Below
 
SETUP_REMOTE_CONTEXT
Sets up a remote Data Pump job context dbms_datapump.setup_remote_context(
user_name      IN VARCHAR2,
job_name       IN VARCHAR2,
version        IN NUMBER,
status_xml     IN VARCHAR2,
status_xml_len IN NUMBER,
more           IN NUMBER);
TBD
 
SET_DEBUG
Sets the internal debug switch

Overload 1
dbms_datapump.set_debug(
on_off  IN NUMBER,
ip_addr IN VARCHAR2 DEFAULT NULL);
TBD
Overload 2 dbms_datapump.set_debug(
debug_flags  IN BINARY_INTEGER
,
version_flag IN BINARY_INTEGER);
TBD
 
SET_PARALLEL
Adjusts the degree of parallelism within a job dbms_datapump.set_parallel (handle IN NUMBER, degree IN NUMBER);
See Export Demo Below
 
SET_PARMETER
Specify job-processing options

Overload 1
dbms_datapump.set_parameter(
handle IN NUMBER,
name IN VARCHAR2,
value IN VARCHAR2);
TBD
Overload 2 dbms_datapump.set_parameter (
handle IN NUMBER,
name   IN VARCHAR2,
value  IN NUMBER);
TBD
 
SET_REMOTE_WORKER
Sets worker for remote job context dbms_datapump.set_remote_worker(worker IN NUMBER);
TBD
 
START_JOB
Begins or resumes job execution dbms_datapump.start_job (
handle       IN NUMBER,
skip_current IN NUMBER DEFAULT 0,
abort_step   IN NUMBER DEFAULT 0,
cluster_ok   IN NUMBER DEFAULT 1,
service_name IN VARCHAR2 DEFAULT NULL);
See Export Demo Below
 
STOP_JOB
Terminates a job, but optionally, preserves the state of the job dbms_datapump.stop_job (
handle      IN NUMBER,
immediate   IN NUMBER DEFAULT 0,
keep_master IN NUMBER DEFAULT NULL,
delay       IN NUMBER DEFAULT 60);
See Export Demo Below
 
TEST_REMOTE_CONTEXT1010
Test if remote Data Pump job context is version 10.1 dbms_datapump.test_remove_context1010;
TBD
 
TEST_REMOTE_CONTEXT1020
Test if remote Data Pump job context is version 10.2 dbms_datapump.test_remote_context1020;
TBD
 
TEST_REMOTE_CONTEXT1120
Test if remote Data Pump job context is version 11.2 dbms_datapump.test_remote_context1120;
TBD
 
TEST_REMOTE_CONTEXT1210 (new 12.1)
Test if remote Data Pump job context is version 12.1 dbms_datapump.test_remote_context1210;
TBD
 
TRACE_ENTRY (new 12.1)
   
 
 
WAIT_FOR_JOB
Runs a job until it either completes normally or stops for some other reason dbms_datapump.wait_for_job(handle IN NUMBER, job_state OUT VARCHAR2);
TBD
 
DataPump Demos
DataPump Export conn / as sysdba

set linesize 121
col owner format a20
col directory_name format a15
col directory_path format a60

SELECT *
FROM dba_directories;

HOST mkdir c:\expimp
HOST mkdir c:\expimp\logs

EXIT

CREATE OR REPLACE DIRECTORY expimp AS 'c:\expimp';

CREATE OR REPLACE DIRECTORY expimp_log AS 'c:\expimp\logs';

GRANT READ,WRITE ON DIRECTORY expimp TO system;

GRANT READ,WRITE ON DIRECTORY expimp_log TO system;

HOST del c:\expimp\*.dmp

HOST del c:\expimp_logs\*.log

set serveroutput on

DECLARE
 ind       NUMBER;        -- loop index
 dph       NUMBER;        -- job handle
 pct_done  NUMBER;        -- percentage complete
 job_state VARCHAR2(30);  -- track job state
 le        ku$_LogEntry;  -- WIP and error messages
 js        ku$_JobStatus; -- job status from get_status
 jd        ku$_JobDesc;   -- job description from get_status
 sts       ku$_Status;    -- status object returned by get_status
BEGIN
  -- create job
  dph := dbms_datapump.open('EXPORT','SCHEMA',NULL,'EXAMPLE5','LATEST');

  -- specify dump file
  dbms_datapump.add_file(dph, 'example5.dmp', 'EXPIMP',
  filetype => dbms_datapump.ku$_file_type_dump_file);

  -- specify log file
  dbms_datapump.add_file(dph, 'example5.log', 'EXPIMP_LOG',
  filetype => dbms_datapump.ku$_file_type_log_file);

  -- specify export schema
  dbms_datapump.metadata_filter(dph, 'SCHEMA_EXPR', 'IN (''HR'')');

  -- set parallelism
  dbms_datapump.set_parallel(dph, 2);

  -- start job
  dbms_datapump.start_job(dph);

  -- monitor job
  pct_done := 0;
  job_state := 'UNDEFINED';
  WHILE (job_state != 'COMPLETED') AND (job_state != 'STOPPED') LOOP
    dbms_datapump.get_status(dph, dbms_datapump.ku$_status_job_error +
    dbms_datapump.ku$_status_job_statusdbms_datapump.ku$_status_wip, -1, job_state, sts);

    js := sts.job_status;

    -- if the percentage done changed, display the new value
    IF js.percent_done != pct_done THEN
      dbms_output.put_line('*** Job percent done = ' || TO_CHAR(js.percent_done));
      pct_done := js.percent_done;
    END IF;

    -- display any work-in-progress (WIP) or error messages received from the job
    IF (BITAND(sts.mask, dbms_datapump.ku$_status_wip) != 0) THEN
      le := sts.wip;
    ELSE
      IF (BITAND(sts.mask, dbms_datapump.ku$_status_job_error) != 0) THEN
        le := sts.error;
      ELSE
        le := NULL;
      END IF;
    END IF;

    IF le IS NOT NULL THEN
      ind := le.FIRST;
      WHILE ind IS NOT NULL LOOP
        dbms_output.put_line(le(ind).LogText);
        ind := le.NEXT(ind);
      END LOOP;
    END IF;
  END LOOP;

  -- indicate that the job finished and detach from it
  dbms_output.put_line('Job has completed');
  dbms_output.put_line('Final job state = ' || job_state);
  dbms_datapump.detach(dph);
EXCEPTION
  WHEN OTHERS THEN
    dbms_datapump.stop_job(dph);
END;
/
set serveroutput on

DECLARE
 l_dp_handle      NUMBER;
 l_sts            KU$_STATUS;
BEGIN
  l_dp_handle := dbms_datapump.open(operation => 'EXPORT',
                                    job_mode => 'SCHEMA',
                                    remote_link => NULL,
                                    job_name => 'EMP_EXPORT',
                                    version => 'LATEST');

  dbms_datapump.add_file(handle => l_dp_handle,
                         filename => 'SCOTT.dmp',
                         directory => 'TEST_DIR');

  dbms_datapump.metadata_filter(handle => l_dp_handle,
                                name => 'SCHEMA_EXPR',
                                value => '= ''SCOTT''');

  dbms_datapump.start_job(l_dp_handle);

  dbms_datapump.detach(l_dp_handle);
END;
/

-- check job status
system@db10g> SELECT * FROM dba_datapump_jobs;
DataPump Import conn / as sysdba

set serveroutput on

DECLARE
 dph NUMBER;
BEGIN
  dph := dbms_datapump.open(operation => 'IMPORT',
                            job_mode => 'TABLE',
                            job_name => 'EMP_IMPORT');

  dbms_datapump.add_file(handle => p_dph,
                         filename => 'EXPIMP%U.DMP',
                         directory => 'EXPIMP', filetype=>1);

  dbms_datapump.add_file(handle => dph,
                         filename => 'EXPIMP.LOG',
                         directory => 'EXPIMP_LOG', filetype=>3);

  dbms_datapump.set_parameter(handle => dph,
                              name => 'TABLE_EXISTS_ACTION',
                              value =>'REPLACE');

  dbms_datapump.start_job(dph);

  dbms_datapump.detach(dph);
EXCEPTION
  WHEN OTHERS THEN
    dbms_output.put_line('Error:' || sqlerrm || ' on Job-ID:' || dph);
END;
/

Related Topics
AMGT$DATAPUMP
DataPump Executable
DBMS_DATAPUMP_UTL
Export
Import
Packages
PSTDY_DATAPUMP_SUPPORT
Transportable Tablespaces
TSDP_DATAPUMP

Morgan's Library Page Footer
This site is maintained by Dan Morgan. Last Updated: This site is protected by copyright and trademark laws under U.S. and International law. © 1998-2014 Daniel A. Morgan All Rights Reserved