Feed aggregator

Kafka Streams and NodeJS – Consuming and periodically reporting in Node.JS on the results from a Kafka Streams streaming analytics application

Amis Blog - Mon, 2017-02-13 09:22

In several previous articles on Apache Kafka, Kafka Streams and Node.JS for interacting with Apache Kafka, I have described how to create a Node.JS application that publishes messages to a Kafka Topic (based on entries in a CSV file), how to create a simple Kafka Streams Java application that processes such messages from that Topic and how to extend that Java application to produce a running Top-N aggregation from that Topic. In this article, I want to discuss a Node application that consumes the Top-N reports from the Kafka Topic produced to by the Kafka Streams application and periodically (once every X seconds) reports on the current standings.

image

The sources for this article are in this GitHub Repo: https://github.com/lucasjellema/kafka-streams-running-topN.

The Node application uses the npm module kafka-node (https://www.npmjs.com/package/kafka-node) for the interaction with Kafka.

A new Client is created – based on the ZooKeeper connect string (ubuntu:2181/). Using the Client, a Consumer is constructed. The consumer is configured to consume from Topic Top3CountrySizePerContinent. A message handler is associated with the consumer, to handle messages on the topic.

The messages consumed by the Node consumer have the following structure:

{"topic":"Top3CountrySizePerContinent"
,"value":"{\"nrs\":[{\"code\":\"DZ\",\"name\":\"Algeria\",\"population\":40263711,\"size\":2381741,\"continent\":\"Africa\"},{\"code\":\"CD\",\"name\":\"Democratic Republic of the Congo\",\"population\":81331050,\"size\":2344858,\"continent\":\"Africa\"},{\"code\":\"SD\",\"name\":\"Sudan\",\"population\":36729501,\"size\":1861484,\"continent\":\"Africa\"},null]}"
,"offset":244
,"partition":0
,"key":{"type":"Buffer","data":[65,102,114,105,99,97]}
}

The key of the message is of type buffer. We happen to know the key is actually a String (the name of the continent). We can extract the key like this:

var continent = new Buffer(countryMessage.key).toString(‘ascii’);

The payload of the message – the top3 for the continent – is in the value property. It can be extracted easily:

var top3 = JSON.parse(countryMessage.value);

{"nrs":
  [
   {"code":"BS","name":"Bahamas","population":327316,"size":13880,"continent":"North America"}
  ,{"code":"AG","name":"Antigua and Barbuda","population":93581,"size":443,"continent":"North America"}
  ,{"code":"AW","name":"Aruba","population":113648,"size":180,"continent":"North America"}
  ,null
  ]
}

The object countrySizeStandings contains a property for each continent. The property is set equal to the top3 that was most recently consumed from the Kafka Topic Top3CountrySizePerContinent.

countrySizeStandings[continent]=top3;

Using the Node built in setInterval() the report() function is scheduled for execution every reportingIntervalInSecs seconds. This function writes the current data in countrySizeStandings to the console.

 

/*

This program consumes Kafka messages from topic Top3CountrySizePerContinent to which the Running Top3 (size of countries by continent) is produced.

This program reports: top 3 largest countries per continent (periodically, with a configurable interval) 
*/


var kafka = require('kafka-node')
var Consumer = kafka.Consumer
var client = new kafka.Client("ubuntu:2181/")

var countriesTopic = "Top3CountrySizePerContinent";
var reportingIntervalInSecs = 4;

var consumer = new Consumer(
  client,
  [],
  {fromOffset: true}
);

consumer.on('message', function (message) {
  handleCountryMessage(message);
});

consumer.addTopics([
  { topic: countriesTopic, partition: 0, offset: 0}
], () => console.log("topic "+countriesTopic+" added to consumer for listening"));

var countrySizeStandings = {}; // the global container for the most recent country size standings 

function handleCountryMessage(countryMessage) {
    var top3 = JSON.parse(countryMessage.value);
    // extract key value from the Kafka message
    var continent = new Buffer(countryMessage.key).toString('ascii');
    // record the top3 for the continent indicated by the message key as current standing in the countrySizeStandings object
    countrySizeStandings[continent]=top3;
}// handleCountryMessage

// every reportingIntervalInSecs seconds, report on the current standings per continent
function report() {
   var d = new Date();
   console.log("Report at "+ d.getHours()+":"+d.getMinutes()+ ":"+d.getSeconds());
   // loop over the keys (properties) in the countrySizeStandings map (object)
   for (var continent in countrySizeStandings) {
     if (countrySizeStandings.hasOwnProperty(continent)) {
        var line = continent+ ": ";
        var index = 1;
        countrySizeStandings[continent].nrs.forEach(function(c) {
          if (c) {
            line = line + (index++) +'. '+ c.name+ '('+c.size+'), ';
          }
        });
        console.log(line);
    }//if
  }//for
}//report

// schedule execution of function report at the indicated interval
setInterval(report, reportingIntervalInSecs*1000);

 

Running the end to end chain requires a running Kafka Cluster and the running of the Node application to produce the country messages from the CSV file, the Kafka Streams Java application to derive the running Top 3 standings and finally the Node application introduced in this article to consume the Top 3 standings and report them to the console (as instructed in the ReadMe in the GitHub Repo):

  • node KafkaCountryProducer.js
  • java -cp target/Kafka-Streams-Country-TopN-1.0-SNAPSHOT.jar;target/dependency/* nl.amis.streams.countries.App
  • node KafkaCountryStreamsConsumer.js

The CountryProducer.js Node application writes the messages it produced to Kafka to the console as well:

SNAGHTML1c22841

The Kafka-Streams-Country-TopN Java application also writes its streaming analytic findings to the console:

SNAGHTML1c325c1

The outcome of the Kafka Streams analysis – as published to the Kafka Topic – is consumed by the Node application, continuously, and reported to the console, periodically (once every 30 seconds), updated with the latest findings:

image

The post Kafka Streams and NodeJS – Consuming and periodically reporting in Node.JS on the results from a Kafka Streams streaming analytics application appeared first on AMIS Oracle and Java Blog.

Band Join 12c

Jonathan Lewis - Mon, 2017-02-13 07:53

One of the optimizer enhancements that appeared in 12.2 for SQL is the “band join”. that makes certain types of merge join much more  efficient.  Consider the following query (I’ll supply the SQL to create the demonstration at the end of the posting) which joins two tables of 10,000 rows each using a “between” predicate on a column which (just to make it easy to understand the size of the result set)  happens to be unique with sequential values though there’s no index or constraint in place:

select
        t1.v1, t2.v1
from
        t1, t2
where
        t2.id between t1.id - 1
                  and t1.id + 2
;

This query returns nearly 40,000 rows. Except for the values at the extreme ends of the range each of the 10,000 rows in t2 will join to 4 rows in t1 thanks to the simple sequential nature of the data. In 12.2 the query, with rowsource execution stats enabled, completed in 1.48 seconds. In 12.1.0.2 the query, with rowsource execution stats OFF, took a little over 14 seconds. (With rowsource execution stats enabled it took 12.1.0.2 a little over 1 minute to return the first 5% of the data – I didn’t bother to wait for the rest, though the rate would have improved over time.)

Here are the two execution plans – spot the critical difference:


12.1.0.2
-----------------------------------------------------------------------------
| Id  | Operation            | Name | Rows  | Bytes | Cost (%CPU)| Time     |
-----------------------------------------------------------------------------
|   0 | SELECT STATEMENT     |      |    25M|   715M|  1058  (96)| 00:00:01 |
|   1 |  MERGE JOIN          |      |    25M|   715M|  1058  (96)| 00:00:01 |
|   2 |   SORT JOIN          |      | 10000 |   146K|    29  (11)| 00:00:01 |
|   3 |    TABLE ACCESS FULL | T1   | 10000 |   146K|    27   (4)| 00:00:01 |
|*  4 |   FILTER             |      |       |       |            |          |
|*  5 |    SORT JOIN         |      | 10000 |   146K|    29  (11)| 00:00:01 |
|   6 |     TABLE ACCESS FULL| T2   | 10000 |   146K|    27   (4)| 00:00:01 |
-----------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------
   4 - filter("T2"."ID"<="T1"."ID"+2) 5 - access("T2"."ID">="T1"."ID"-1)
       filter("T2"."ID">="T1"."ID"-1)

12.2.0.1
----------------------------------------------------------------------------
| Id  | Operation           | Name | Rows  | Bytes | Cost (%CPU)| Time     |
----------------------------------------------------------------------------
|   0 | SELECT STATEMENT    |      | 40000 |  1171K|    54  (12)| 00:00:01 |
|   1 |  MERGE JOIN         |      | 40000 |  1171K|    54  (12)| 00:00:01 |
|   2 |   SORT JOIN         |      | 10000 |   146K|    27  (12)| 00:00:01 |
|   3 |    TABLE ACCESS FULL| T1   | 10000 |   146K|    25   (4)| 00:00:01 |
|*  4 |   SORT JOIN         |      | 10000 |   146K|    27  (12)| 00:00:01 |
|   5 |    TABLE ACCESS FULL| T2   | 10000 |   146K|    25   (4)| 00:00:01 |
----------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------
   4 - access("T2"."ID">="T1"."ID"-1)
       filter("T2"."ID"<="T1"."ID"+2 AND "T2"."ID">="T1"."ID"-1)

Notice how operation 4, the FILTER, that appeared in 12.1 has disappeared in 12.2 and the filter predicate that it used to hold is now part of the filter predicate of the SORT JOIN that has been promoted to operation 4 in the new plan.

As a reminder – the MERGE JOIN operates as follows: for each row returned by the SORT JOIN at operation 2 it calls operation 4. In 12.1 this example will then call operation 5 so the SORT JOIN there happens 10,000 times. It’s important to know, though, that the name of the operation is misleading; what’s really happening is that Oracle is “probing a sorted result set in local memory” 10,000 times – it’s only on the first probe that it finds it has to call operation 6 to read and move the data into local memory in sorted order.

So in 12.1 operation 5 probes (accesses) the in-memory data set starting at the point where t2.id >= t1.id – 1; I believe there’s an optimisation here because Oracle will recall where it started the probe last time and resume searching from that point; having found the first point in the in-memory set where the access predicate it true Oracle will walk through the list passing each row back to the FILTER operation as long as the access predicate is still true, and it will be true right up until the end of the list. As each row arrives at the FILTER operation Oracle checks to see if the filter predicate there is true and passes the row up to the MERGE JOIN operation if it is. We know that on each cycle the FILTER operation will start returning false after receiving 4 rows from SORT JOIN operation – Oracle doesn’t.  On average the SORT JOIN operation will send 5,000 rows to the FILTER operation (for a total of 50,000,000 values passed and discarded).

In 12.2, and for the special case here where the join predicate uses constants to define the range, Oracle has re-engineered the code to eliminate the FILTER operation and to test both parts of the between clause in the same subroutine it uses to probe and scan the rowsource. In 12.2 the SORT JOIN operation will pass 4 rows up to the MERGE JOIN operation and stop scanning on the fifth row it reaches. In my examples that’s an enormous (CPU) saving in subroutine calls and redundant tests.

Footnote:

This “band-join” mechanism only applies when the range is defined by constants (whether literal or bind variable). It doesn’t work with predicates like (e.g.):

where t2.id between t1.id - t1.step_back and t1.id + t1.step_forward

The astonishing difference in performance due to enabling rowsource execution statistics is basically due to the number of subroutine calls eliminated – I believe (subject to a hidden parameter that controls a “sampling frequency”) that Oracle will call the O/S clock twice each time it calls the SORT JOIN operation from the FILTER operation to acquire the next row. In 12.1 we’re doing 50M calls redundant calls to SORT JOIN.

The dramatic difference in performance even when rowsource execution statistics isn’t enabled is probably something you won’t see very often in a production system – after all, I engineered a fairly extreme data set and query for the purposes of demonstration. Note, however, the band join does introduce a change in cost, so it’s possible that on the upgrade you may find a few cases where the optimizer will switch from a nested loop join to a merge join using a band-join.


Oracle Launches Cloud Service to Help Organizations Integrate Disparate Data and Drive Real-Time Analytics

Oracle Press Releases - Mon, 2017-02-13 07:00
Press Release
Oracle Launches Cloud Service to Help Organizations Integrate Disparate Data and Drive Real-Time Analytics Enhances Oracle Cloud Platform Portfolio with Oracle Data Integrator Cloud Service

Redwood Shores, Calif.—Feb 13, 2017

Oracle today expanded Oracle Cloud Platform’s data integration offerings with the launch of Oracle Data Integrator Cloud. This new cloud service significantly simplifies and accelerates cross-enterprise data integration to support real-time analytics that help organizations drive better business decisions. 

In today’s information driven economy, data is a fundamental asset to most businesses. As more and more data moves to the cloud, getting information and insight to the right people and the right applications at the right time becomes progressively more difficult. With the introduction today of the Oracle Data Integrator Cloud, organizations can improve their agility by deploying projects more quickly, reduce risk with an open, non-proprietary technology, and reduce costs with better productivity. 

“To be effective and agile, enterprises need seamless communication and flow of data between sources and targets - data originating from IoT, Web, and business applications or data that is stored in the cloud or on premises,” said Jeff Pollock, vice president of product management, Oracle. “Oracle Data Integrator Cloud provides businesses with a high-performance, simple, and integrated cloud service to execute data transformations where the data lies, with no hand coding required, and without having to copy data unnecessarily.”

Easy to use and integrate, Oracle Data Integrator Cloud helps organizations improve productivity, reduce development costs, and lower total cost of ownership by facilitating better data movement and transformation between Oracle and non-Oracle systems, data sources, and applications. It offers a flow-based declarative user interface along with release management capabilities that allow customers to improve productivity and better manage their code, as well as their development, testing and production environments. Oracle Data Integrator Cloud’s high performance architecture, with its E-LT capabilities and advanced parallelism options enable faster, more efficient loading and transformation for data marts, data warehouses, and big data systems.

Oracle Data Integrator Cloud is fully integrated with Oracle’s PaaS offerings, including Oracle Database Cloud, Oracle Database Exadata Cloud, and Oracle Big Data Cloud. Oracle also delivers pre-built integration for non-Oracle solutions, allowing users to seamlessly switch between underlying Big Data technologies such as Hive, HDFS, HBase, and Sqoop.

Oracle Cloud

Oracle Cloud is the industry’s broadest and most integrated public cloud, offering a complete range of services across SaaS, PaaS, and IaaS. It supports new cloud environments, existing ones, and hybrid, and all workloads, developers, and data.  The Oracle Cloud delivers nearly 1,000 SaaS applications and 50 enterprise-class PaaS and IaaS services to customers in more than 195 countries around the world and supports 55 billion transactions each day.

For more information, please visit us at http://cloud.oracle.com.

Contact Info
Nicole Maloney
Oracle
+1.415.235.4033
nicole.maloney@oracle.com
Kristin Reeves
Blanc & Otus
+1.415.856.5145
kristin.reeves@blancandotus.com
About Oracle

Oracle offers a comprehensive and fully integrated stack of cloud applications and platform services. For more information about Oracle (NYSE:ORCL), visit www.oracle.com.

Trademarks

Oracle and Java are registered trademarks of Oracle and/or its affiliates. Other names may be trademarks of their respective owners.

Safe Harbor

The preceding is intended to outline our general product direction. It is intended for information purposes only, and may not be incorporated into any contract. It is not a commitment to deliver any material, code, or functionality, and should not be relied upon in making purchasing decisions. The development, release, and timing of any features or functionality described for Oracle’s products remains at the sole discretion of Oracle Corporation. 

Talk to a Press Contact

Nicole Maloney

  • +1.415.235.4033

Kristin Reeves

  • +1.415.856.5145

UNION vs UNION ALL: What’s The Difference?

Complete IT Professional - Mon, 2017-02-13 05:00
What’s the difference between UNION and UNION ALL in Oracle SQL? There are a few. Learn what they are in this article. What Is UNION and UNION ALL? First of all, let’s explain what they are. UNION and UNION ALL are both “set operators”. They are keywords you can use to combine two sets of […]
Categories: Development

Oracle 12c – Issues with the HEATMAP Segment even if the heat map feature is not used

Yann Neuhaus - Mon, 2017-02-13 04:55

When I don’t need I feature, I don’t turn it on, or do not use it because it reduces the possibility to run into issues. Most of the times this is true, however, during the preparation for an RMAN workshop, the RMAN list failure command showed me the following dictionary issue.

RMAN> list failure;

using target database control file instead of recovery catalog
Database Role: PRIMARY

List of Database Failures
=========================

Failure ID Priority Status    Time Detected        Summary
---------- -------- --------- -------------------- -------
2          CRITICAL OPEN      13-FEB-2017 10:12:26 SQL dictionary health check: seg$.type# 31 on object SEG$ failed

I thought first, that it might be related to some incorrect errors shown by the health check (DBMS_HM), because there used to be some issues with that tool. But even after applying the following patch, nothing changed and the error still appears.

19543595: INCORRECT HEALTHCHECK ERRORS FROM DBMS_HM – FALSE ERRORS ON TS$ , FILE$ OR USER

So I started a manual health check again to get some more details.

SQL> BEGIN
  2  DBMS_HM.RUN_CHECK (check_name => 'Dictionary Integrity Check',
  3  run_name => 'WilliamsDICTrun002',
  4  input_params => 'CHECK_MASK=ALL');
  5  END;
  6  /

PL/SQL procedure successfully completed.

SQL> SELECT DBMS_HM.GET_RUN_REPORT('WilliamsDICTrun002') from dual;

DBMS_HM.GET_RUN_REPORT('WILLIAMSDICTRUN002')
---------------------------------------------------------------------
Basic Run Information
 Run Name                     : WilliamsDICTrun002
 Run Id                       : 61
 Check Name                   : Dictionary Integrity Check
 Mode                         : MANUAL
 Status                       : COMPLETED
 Start Time                   : 2017-02-13 10:56:58.250100 +01:00
 End Time                     : 2017-02-13 10:56:58.689301 +01:00
 Error Encountered            : 0
 Source Incident Id           : 0
 Number of Incidents Created  : 0

Input Paramters for the Run
 TABLE_NAME=ALL_CORE_TABLES
 CHECK_MASK=ALL

Run Findings And Recommendations
 Finding
 Finding Name  : Dictionary Inconsistency
 Finding ID    : 62
 Type          : FAILURE
 Status        : OPEN
 Priority      : CRITICAL
 Message       : SQL dictionary health check: seg$.type# 31 on object SEG$
               failed
 Message       : Damaged rowid is AAAAAIAABAAAK+RAAc - description: Ts# 1
               File# 2 Block# 28032 is referenced

Now I do have the ROWID, the file number and the block number of the affecting object. Let’s see what it is.

SQL> select FILE#, BLOCK#, TYPE#, TS#, BLOCKS from seg$ where rowid='AAAAAIAABAAAK+RAAc';

     FILE#     BLOCK#      TYPE#        TS#     BLOCKS
---------- ---------- ---------- ---------- ----------
         2      28032         11          1       1024
		 

SQL> SELECT segment_name, segment_type, block_id, blocks
  2  FROM   dba_extents
  3  WHERE
  4  file_id = 2
  5  AND
  6  ( 28032 BETWEEN block_id AND ( block_id + blocks ) );

SEGMENT_NAME               SEGMENT_TYPE               BLOCK_ID     BLOCKS
-------------------------- ------------------------ ---------- ----------
HEATMAP                    SYSTEM STATISTICS             28032       1024

Really strange. It is related to the HEATMAP segment, but I am not using the heat map feature, or used it in the past.

SQL> show parameter heat

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
heat_map                             string      OFF

SQL> select name, DETECTED_USAGES from DBA_FEATURE_USAGE_STATISTICS where name like 'Heat%';

NAME                     DETECTED_USAGES
------------------------ ---------------
Heat Map                               0

But how can I get this fixed now? You could either ignore this issue, create a SR at Oracle, or you can drop the statistics segment, in case you are not using the heatmap feature.

In my case, I decided to the drop the statistics segment by issuing the following command. Dropping the statistics segment works by setting the underscore parameter “_drop_stat_segment” to 1.

SQL> select SEGMENT_NAME, SEGMENT_TYPE from dba_extents where SEGMENT_TYPE = 'SYSTEM STATISTICS';

SEGMENT_NAME               SEGMENT_TYPE
-------------------------- ------------------------
HEATMAP                    SYSTEM STATISTICS

SQL> ALTER SYSTEM SET "_drop_stat_segment"=1 scope=memory;

System altered.

SQL> select SEGMENT_NAME, SEGMENT_TYPE from dba_extents where SEGMENT_TYPE = 'SYSTEM STATISTICS';

no rows selected

The heat map table is gone now. Let’s run the dictionary check again.

SQL> BEGIN
  2  DBMS_HM.RUN_CHECK (check_name => 'Dictionary Integrity Check',
  3  run_name => 'WilliamsDICTrun003',
  4  input_params => 'CHECK_MASK=ALL');
  5  END;
  6  /

PL/SQL procedure successfully completed.

SQL> SELECT DBMS_HM.GET_RUN_REPORT('WilliamsDICTrun003') from dual;

DBMS_HM.GET_RUN_REPORT('WILLIAMSDICTRUN003')
---------------------------------------------------------------------
Basic Run Information
 Run Name                     : WilliamsDICTrun003
 Run Id                       : 81
 Check Name                   : Dictionary Integrity Check
 Mode                         : MANUAL
 Status                       : COMPLETED
 Start Time                   : 2017-02-13 11:17:15.190873 +01:00
 End Time                     : 2017-02-13 11:17:15.642501 +01:00
 Error Encountered            : 0
 Source Incident Id           : 0
 Number of Incidents Created  : 0

Input Paramters for the Run
 TABLE_NAME=ALL_CORE_TABLES
 CHECK_MASK=ALL

Run Findings And Recommendations


RMAN> list failure;

using target database control file instead of recovery catalog
Database Role: PRIMARY

no failures found that match specification

 

Looks much better now.

Conclusion

Even if you are not using some features, you can still have trouble with them. :-)

 

Cet article Oracle 12c – Issues with the HEATMAP Segment even if the heat map feature is not used est apparu en premier sur Blog dbi services.

Webcast: "EBS Technology: Latest Features and Roadmap"

Steven Chan - Mon, 2017-02-13 02:06

ATG RoadmapOracle University has a wealth of free webcasts for Oracle E-Business Suite.  If you're looking for a summary of  recent updates from the Applications Technology Group, see:

Lisa Parekh, Vice President Technology Integration, provides an overview of Oracle’s recent advances and product strategy for Oracle E-Business Suite technology. This is the cornerstone session for Oracle E-Business Suite technology. Come hear how you can get the most out of Oracle E-Business Suite by taking advantage of the latest user interface updates and mobile applications.  Learn about systems administration and configuration management tools for running Oracle E-Business Suite on-premises or in the cloud, and hear how the latest technologies can be used with Oracle E-Business Suite to improve performance, security, and ease of integration for your system. This material was presented at Oracle OpenWorld 2016.

Categories: APPS Blogs

How to retrieve current value of oracle identity column value for foreign key purpose

Tom Kyte - Sun, 2017-02-12 22:06
Hi, after inserting into parent table w identity column, how to retrieve the identity column value to use for child table foreign key purpose thx Heather
Categories: DBA Blogs

Date math to calculate A and B weeks

Tom Kyte - Sun, 2017-02-12 22:06
I am trying to calculate the Recycle Week. It is either an A or B week. My first attempt was the following with obvious problems as the first day of the week changes every year. <code> SELECT DECODE(MOD(TO_NUMBER(TO_CHAR(SYSDATE, 'WW')), 2), 0,...
Categories: DBA Blogs

"Table override": can I use this feature?

Tom Kyte - Sun, 2017-02-12 22:06
Hi! I created an empty table, lets say, tb_test (col1 varchar2(20 char), col2 varchar2(20 char)); Now I created a view of this table, lets say, vw_test as select * from tb_test; If I do select * from vw_test; I will get nothing, it is empty....
Categories: DBA Blogs

Goldengate 12c Find log sequence#, rba# for integrated extract (Doc ID 2006932.1)

Michael Dinh - Sun, 2017-02-12 20:50

When using integrated extract, info command does not show Seqno, RBA, e.g. Seqno 3292, RBA 79236752.
Command send status will show Seqno, RBA; is not usable when process is stopped.
For Oracle GoldenGate – Version 12.1.2.1.2 and later, there is now debug option to retrieve Seqno, RBA.

oracle@arrow1:HAWKA:/u01/app/12.2.0.1/ggs01
$ ggsci

Oracle GoldenGate Command Interpreter for Oracle
Version 12.2.0.1.1 OGGCORE_12.2.0.1.0_PLATFORMS_151211.1401_FBO
Linux, x64, 64bit (optimized), Oracle 11g on Dec 12 2015 00:54:38
Operating system character set identified as UTF-8.

Copyright (C) 1995, 2015, Oracle and/or its affiliates. All rights reserved.



GGSCI (arrow1.localdomain) 1> info all

Program     Status      Group       Lag at Chkpt  Time Since Chkpt

MANAGER     RUNNING
EXTRACT     RUNNING     E_HAWK      00:00:10      00:00:06
EXTRACT     STOPPED     P_HAWK      00:00:00      00:02:03


GGSCI (arrow1.localdomain) 2> info e*

EXTRACT    E_HAWK    Last Started 2017-02-12 18:35   Status RUNNING
Checkpoint Lag       00:00:10 (updated 00:00:08 ago)
Process ID           1665
Log Read Checkpoint  Oracle Integrated Redo Logs
                     2017-02-12 18:36:06
                     SCN 0.4928929 (4928929)


GGSCI (arrow1.localdomain) 3> info e* debug

EXTRACT    E_HAWK    Last Started 2017-02-12 18:35   Status RUNNING
Checkpoint Lag       00:00:10 (updated 00:00:02 ago)
Process ID           1665
Log Read Checkpoint  Oracle Integrated Redo Logs
                     2017-02-12 18:36:16  Seqno 3292, RBA 79236752
                     SCN 0.4928939 (4928939)


GGSCI (arrow1.localdomain) 4> send e* status

Sending STATUS request to EXTRACT E_HAWK ...


EXTRACT E_HAWK (PID 1665)
  Current status: Recovery complete: Processing data

  Current read position:
  Redo thread #: 1
  Sequence #: 3292
  RBA: 79263888
  Timestamp: 2017-02-12 18:36:47.000000
  SCN: 0.4928987 (4928987)
  Current write position:
  Sequence #: 0
  RBA: 1420
  Timestamp: 2017-02-12 18:36:56.251219
  Extract Trail: ./dirdat/aa



GGSCI (arrow1.localdomain) 5> stop e*

Sending STOP request to EXTRACT E_HAWK ...
Request processed.


GGSCI (arrow1.localdomain) 6> send e* status

Sending STATUS request to EXTRACT E_HAWK ...

ERROR: sending message to EXTRACT E_HAWK (Connection reset by peer).


GGSCI (arrow1.localdomain) 7> info all

Program     Status      Group       Lag at Chkpt  Time Since Chkpt

MANAGER     RUNNING
EXTRACT     STOPPED     E_HAWK      00:00:07      00:00:13
EXTRACT     STOPPED     P_HAWK      00:00:00      00:03:09


GGSCI (arrow1.localdomain) 8> info e* debug

EXTRACT    E_HAWK    Last Started 2017-02-12 18:35   Status STOPPED
Checkpoint Lag       00:00:07 (updated 00:00:19 ago)
Log Read Checkpoint  Oracle Integrated Redo Logs
                     2017-02-12 18:37:07  Seqno 3292, RBA 79275152
                     SCN 0.4929013 (4929013)


GGSCI (arrow1.localdomain) 9> exit
oracle@arrow1:HAWKA:/u01/app/12.2.0.1/ggs01
$

12cR2 DBCA can create a standby database

Yann Neuhaus - Sun, 2017-02-12 15:33

Do you like DBCA to create a database from command line, with -silent -createDatabase? On a simple command line you can provision a database, with oratab, tnsnames.ora directory creation and any setting you want. And you can even call a custom script to customize further. But if you want to put it in Data Guard, you have to do the duplicate manually with RMAN. This evolves in 12.2 with a new option in DBCA to do that: dbca -silent -createDuplicateDB -createAsStandby

Limitations

I’ve tried in the Oracle Public Cloud where I just created a RAC database. But unfortunately, this new feature is only for Single Instance:

[FATAL] [DBT-16056] Specified primary database is not a Single Instance (SI) database.
CAUSE: Duplicate database operation is supported only for SI databases.

Ok. RAC is complex enough anyway, so you don’t need that quick command line to create the standby. So I tried with a single instance database:

[FATAL] [DBT-16057] Specified primary database is a container database (CDB).
CAUSE: Duplicate database operation is supported only for non container databases.

Ok. That a bit surprising to have a new feature in 12.2 that works only on the architecture that is deprecated in 12.1 but if we think about it, DBCA is for fast provisioning. In multitenant you create a CDB once, put it in Data Guard, and the fast provisioning comes with the ‘create pluggable database’. And deprecated doesn’t mean that we do not use it, and it is good to have a simple command line tools for easy provisioning in non-CDB.

Then, I tried on a non-CDB that I’ve created in 12.2

I’m a big fan of EZCONNECT but I had a few problems with it. What’s worth to know is that there is no ‘impossible to connect’ message. When it cannot connect, the following message is raised:

[FATAL] [DBT-16051] Archive log mode is not enabled in the primary database.
ACTION: Primary database should be configured with archive log mode for creating a duplicate or standby database.

just because this is the first thing that DBCA checks and this is where it fails when connections is not ok.

But you can also use a tnsnames.ora network service name. This is what I’ll use for -primaryDBConnectionString

$ tnsping ORCLA
TNS Ping Utility for Linux: Version 12.2.0.1.0 - Production on 11-FEB-2017 22:28:35
Copyright (c) 1997, 2016, Oracle. All rights reserved.
Used parameter files:
/u01/app/oracle/product/12.2.0/dbhome_1/network/admin/sqlnet.ora
Used TNSNAMES adapter to resolve the alias
Attempting to contact (DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = MAA.compute-usslash.oraclecloud.internal)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = orcla.compute-usslash.oraclecloud.internal)))
OK (0 msec)

-createDuplicateDB -createAsStandby

Here is an example:

dbca -silent -createDuplicateDB -gdbName ORCLB.compute-usslash.oraclecloud.internal -sid ORCLB -sysPassword "Ach1z0#d" -primaryDBConnectionString ORCLA -createAsStandby -dbUniquename ORCLB

This will connect RMAN to the target (here called ‘primary’), with the connect string ORCLA and run a duplicate to create ORCLB as specified.

It starts to create a temporary listener (which is still there in listener.ora even after completion), create the auxiliary instance and run RMAN:
Listener config step
33% complete
Auxiliary instance creation
66% complete
RMAN duplicate
100% complete
Look at the log file "/u01/app/oracle/cfgtoollogs/dbca/ORCLB/orcla.log" for further details.

Through RMAN API, the file names are set:


run {
set newname for datafile 1 to '/u01/app/oracle/oradata/orclb/system01.dbf' ;
set newname for datafile 3 to '/u01/app/oracle/oradata/orclb/sysaux01.dbf' ;
set newname for datafile 4 to '/u01/app/oracle/oradata/orclb/undotbs01.dbf' ;
set newname for datafile 7 to '/u01/app/oracle/oradata/orclb/users01.dbf' ;
set newname for tempfile 1 to '/u01/app/oracle/oradata/orclb/temp01.dbf' ;

and the DUPLICATE FOR STANDBY FROM ACTIVE is run:

duplicate target database
for standby
from active database
dorecover
spfile
set 'db_recovery_file_dest_size'='8405385216'
set 'compatible'='12.2.0'
set 'undo_tablespace'='UNDOTBS1'
set 'dispatchers'='(PROTOCOL=TCP) (SERVICE=ORCLAXDB)'
set 'db_name'='orcla'
set 'db_unique_name'='ORCLB'
set 'sga_target'='2281701376'
set 'diagnostic_dest'='/u01/app/oracle'
set 'audit_file_dest'='/u01/app/oracle/audit'
set 'open_cursors'='300'
set 'processes'='300'
set 'nls_language'='AMERICAN'
set 'pga_aggregate_target'='757071872'
set 'db_recovery_file_dest'='/u01/app/oracle/fast_recovery_area/orcla'
set 'db_block_size'='8192'
set 'log_archive_format'='%t_%s_%r.dbf'
set 'nls_territory'='AMERICA'
set 'control_files'="/u01/app/oracle/oradata/orclb/control01.ctl", "/u01/app/oracle/fast_recovery_area/orcla/ORCLB/control02.ctl"
set 'audit_trail'='DB'
set 'db_domain'='compute-usslash.oraclecloud.internal'
set 'remote_login_passwordfile'='EXCLUSIVE'
reset 'local_listener'
reset 'db_file_name_convert'
set 'log_archive_dest_1'='location=/u01/app/oracle/fast_recovery_area/orcla'
reset 'event'
reset 'remote_listener'
nofilenamecheck;
}

The parameters are coming from the ‘primary’ and adapted for the new database. Be careful. This is where I prefer to review the parameters before. For example, when you duplicate to clone the primary (without the -createAsStandby) you probably don’t want to keep the same log_archive_dest that was set in a Data Guard configuration. I’ll have to post a blog about that.

At the end, the standby database is opened read-only, so be careful to close it before starting the apply of redo if you don’t have the Active Data Guard option.

Data Guard

DBCA doesn’t go beyond the DUPLICATE. And you can use it also in Standard Edition to setup the manual standby.

I hope that one day we will have an option to create the Data Guard configuration in the same process, but here you have to do it yourself:

  • No tnsnames.ora entry is added for the standby
  • The static listener entries are not added in listener.ora
  • No Data Guard configuration is there
  • The Data Guard Broker is not started except if it was set in advance to true on primary
  • No standby redo logs are created (except when they were present on primary)

You can set dg_broker_start=true and create the standby redo logs on a post-script that you call with the -customScripts argument. However, the best way is to do it in advance on the primary, and then the duplicate will do the same on the standby.

So what?

You don’t need this new feature because it is easy to automate it yourself. It’s just a copy of spfile parameters, with a few change, and a RMAN duplicate command. But your scripts will be specialized for your environment. Generic scripts are more complex to maintain. The big advantage to have this integrated on DBCA is that is designed for all configurations, and is maintained through versions.

 

Cet article 12cR2 DBCA can create a standby database est apparu en premier sur Blog dbi services.

10 years of "run like hell" - an anniversary after nearly 300 blog entries...

Dietrich Schroff - Sun, 2017-02-12 10:42
Ten years ago after a lecture given at the Technische Universität Kaiserslautern i started this blog with this posting (about the lecture). And you can see: the link does not work anymore but the screenshot still remains after such a long time.
After this post, i owned a blog, with one entry and noticed that finding topics to write about is not really easy.
In August 2017 i started to write about Oracle products like jDeveloper, 11c, ... With more than 100 posts over 30% of the blog entries are related to Oracle. In respect to this fact my blog is listed by 2 Oracle aggregators.
But for the last three years i did not write about Oracle anymore. But nevertheless there are still readers visiting:
The blog was called 350.000 times in 10 years - not really a big site, but nearly 3000 readers per week.
So thank you for reading my postings - let's see if i will run like hell for another 10 years ;-)

Connecting Oracle Management Cloud with Oracle Enterprise Manager 13c

Amis Blog - Sun, 2017-02-12 10:00

Let’s clear about this: Oracle Management Cloud (OMC) is NOT a replacement of Oracle Enterprise Manager Cloud Control (OEM CC) or even an equivalant. Rumours are that this will  be Oracle’s policy in a far away future, but in the meantime we focus on what they do best. OEM CC is a product for a complete management solution for your Oracle environment, OMC for monitoring and, most of all,  analyse the monitored data in your Oracle environment.

Oracle made it possible to connect these worlds by using the data of the repository of OEM CC in OMC. And that’s what this post is about.

In a previous blog about monitoring Infrastructure with OMC I installed an OMC-cloud agent on a server with OEM CC with the repository database on it.

Through this OMC-cloud agent it’s possible to monitor the assets – in a nice gui – but what I’d really like to do is use the data in the OEM CC – repository for the analytical power of Oracle Management Cloud.

My infrastructure monitoring is working since this blog by installing an OMC-cloud agent. The question is however, do I have to install an OMC-cloud agent on every node, and connect every node  to the OMC?  A part of that is true. A cloud agent is necessary on every node, but they all can be directed to 1 node where a central gateway has been installed for connection to OMC. But of course you also can install a data collector for information from the Oracle Enterprise Manager Repository.

In the documentation of IT-analytics there’s a picture with quite a nice overview:

image

Another, maybe more explanatory image:

 

image

 

For now I’m interested in the data collector. This data collector collects different types of data from the Oracle Management Repository, including targets, target properties, metrics, performance metrics, and events.

According to the doc I need to install the gateway ánd the datacollector. In the former post I already downloaded the master-installer for Infrastructure monitoring, and this is the same as for the gateway and data collector.

The gateway

For the gateway I chose to use port 1840

Performing the installation in two steps: downloading first, and then install from staged directory.

./AgentInstall.sh AGENT_TYPE=gateway AGENT_REGISTRATION_KEY=’RMxMm7chywi-J-VZ7_UfxY5XUU’  STAGE_LOCATION=/gwayagent -download_only  —-> this may take a few minutes, results in a gateway.zip of approx 290 MB.

./AgentInstall.sh AGENT_TYPE=gateway AGENT_REGISTRATION_KEY=’RMxMm7chywi-J-VZ7_UfxY5XUU’ AGENT_BASE_DIR=/omc_gway AGENT_PORT=1840 -staged

Generating emaas.properties …
Extracting Agent Software …
Installing the Agent …
Registering the Agent …
Downloading Certificates …
Configuring the Agent …
Cleanup temporary files …
The following configuration scripts need to be executed as the root user
#!/bin/sh
#Root script to run
/omc_gway/core/1.12.0/root.sh

That was quite easy.

The data collector.

Same recipe, first download, then the install.

./AgentInstall.sh AGENT_TYPE=data_collector AGENT_REGISTRATION_KEY=’RMxMm7chywi-J-VZ7_UfxY5XUU'  STAGE_LOCATION=/gwayagent -download_only  ---> this results in a lama.zip, the same as the cloud_agent,so in fact no need to download again!

 

./AgentInstall.sh AGENT_TYPE=data_collector AGENT_REGISTRATION_KEY=’RMxMm7chywi-J-VZ7_UfxY5XUU’ AGENT_BASE_DIR=/omc_dc GATEWAY_HOST=ovamisux159.amis.local GATEWAY_PORT=1840 EM_AGENT_NAME=ovamisux159.amis.local:1830 HARVESTER_USERNAME=SYSMAN_EMAAS_3 OMR_USERNAME=sys OMR_HOSTNAME=ovamisux159.amis.local OMR_HOST_USER_PASSWORD=welcome1 OMR_PORT=1521 OMR_SERVICE_NAME=cloudcontrol OMR_HOST_USERNAME=oracle OMR_STAGE_DIR=/gwayagent -staged

Enter HARVESTER_USER_PASSWORD:
Enter OMR_USER_PASSWORD:

Generating emaas.properties …
Extracting Agent Software …
Installing the Agent …
Registering the Agent …
Downloading Certificates …
Configuring the Agent …
Cleanup temporary files …
The following configuration scripts need to be executed as the root user
#!/bin/sh
#Root script to run
/omc_dc/core/1.12.0/root.sh

Here I was a bit confused by the use of OMR_USERNAME. The documentation  of the agent_install script states that this is “The Oracle Management Repository user name “.  But this user should be a user with SYSDBA to install the Harvester schema – in this case SYSMAN_EMAAS_3.

O.k. done.

A recap: what is running on my server at this moment:

OEM CC – the Oracle Enterprise Manager – Management Server

OEM CC – the Oracle Enterprise Manager repository database 12.1

OEM CC – the Oracle Enterprise Manager agent

OMC – cloud agent

OMC – gateway (agent)

OMC – data collector (agent)

And the data collector is showing him/herself in the Oracle Management Cloud:

image

 

Then it’s time to add entities to the IT analytics-datawarehouse according to the doc. This time no need for OMCLI-commands, now’s it’s time to return to the GUI of the OMC.

Here it’s called ‘Enabling Services’ on Entities.

 

Be aware: only the items which were added through the infrastructure monitoring (through OMCLI) can be enabled as a service!

A small documentation bug: I seem to miss a functionality: the IT Analytics Administration link.

According to the documentation:

As an IT Analytics administrator, you can add database targets to the IT Analytics warehouse for AWR collection.
To add targets, do the following:
From the Oracle Management Cloud home page, click the Application Navigator icon in the upper-right corner of the page.
From the Administration group, click the IT Analytics Administration link

Found out this link is no longer called “IT Analytics Administration”, it is just “Administration”.
From there, the link “Enable/Disable Services” is used to add entities/targets to the ITA Warehouse.

So… this one:

image

 

 

 

image

 

 

 

image

 

image

 

image

 

And there it is:

 

image

 

But, no data is yet to be seen. Is the data collector healthy and well-configured?

[oracle@ovamisux159 bin]$ ./omcli status agent
Oracle Management Cloud Data Collector
Copyright (c) 1996, 2016 Oracle Corporation.  All rights reserved.
—————————————————————
Version                : 1.12.0
State Home             : /omc_dc/agent_inst
Log Directory          : /omc_dc/agent_inst/sysman/log
Binaries Location      : /omc_dc/core/1.12.0
Process ID             : 27293
Parent Process ID      : 27235
URL                    :
https://ovamisux159.amis.local:1830/emd/main/
Started at             : 2016-12-30 12:26:01
Started by user        : oracle
Operating System       : Linux version 3.8.13-55.1.6.el7uek.x86_64 (amd64)
Data Collector enabled : true
Sender Status          : FUNCTIONAL
Gateway Upload Status  : FUNCTIONAL
Last successful upload : 2017-01-22 17:03:01
Last attempted upload  : 2017-01-22 17:03:01
Pending Files (MB)     : 0.01
Pending Files          : 16
Backoff Expiration     : (none)

—————————————————————
Agent is Running and Ready

Are  there any connectivity issues?

[oracle@ovamisux159 bin]$ ./omcli status agent connectivity -verbose
Oracle Management Cloud Data Collector
Copyright (c) 1996, 2016 Oracle Corporation.  All rights reserved.
—————————————————————
Data Collector is Running

No significant connectivity issues found between Data Collector and Gateway.
Check the connectivity status of the Gateway: ovamisux159.amis.local

Ahh.. forgot one thing:

Following the note “Enabling Host Monitoring for the Infrastructure Monitoring Cloud Service (Doc ID 2195074.1)” changed the file /omc_dc/plugins/oracle.em.sgfm.zip/1.12.0/configs/discovery.properties. Deleted the # sign at the line with ‘omc_host_linux’. :

navigate to directory:

cd /u01/app/oracle/omc_dc/plugins/oracle.em.sgfm.zip/1.12.0/configs

open a file for editing in vi

vi discovery.properties

comment out the parameter: disable_monitoring_for_entitytype=omc_host_linux

save the modified file.

Activate the changes by stopping and starting the agent:

./omcli stop agent

./omcli start agent

 

Checking the log file /omc_dc/agent_inst/sysman/log/gcagent.log : no errors.

 

Are there any items that are added to OMC? When navigating to e.g. Enterprise Health –> Entities, I can see entities which I haven’t added manually to EMC, and which are obviously added by the OMC – data explorer.

You may ask why the status is unknown? That’s because of the OEM CC agent which died at my server while having some other problems.

 

image

 

Nice to know: how does this data collector collects his data of the repository? This is recorded in the log-file of the OMC-data collector log, hereby an example.

SELECT a.target_guid, a.snapshot_guid, a.snapshot_type, a.target_type, a.start_timestamp, c.timezone_region, b.ecm_snapshot_id, b.FILE_SYSTEM “_E_FILE_SYSTEM_”

FROM mgmt_ecm_gen_snapshot a

INNER JOIN mgmt_targets c ON (a.target_guid = c.target_guid)

INNER JOIN emaas_whitelist_entities w

ON (w.harvester_name = ? AND c.target_guid = w.member_entity_guid)

INNER JOIN em_ecm_all_config_snapshots e

ON (a.snapshot_guid = e.ecm_snapshot_id AND a.start_timestamp = e.collection_timestamp   AND (  (w.is_new = 0 AND e.saved_timestamp >= ? AND e.saved_timestamp < ?)OR   (w.is_new = 1 AND e.saved_timestamp < ?)     )      )

INNER JOIN em_ecm_all_config_metrics d

ON (d.ecm_snapshot_id = a.snapshot_guid AND d.table_name = ? AND d.collection_timestamp = e.collection_timestamp)

LEFT OUTER JOIN EM_ESM_FILESYS b ON (a.snapshot_guid = b.ecm_snapshot_id)

WHERE a.target_type = ?

AND a.snapshot_type = ?

AND a.is_current = ?

AND a.target_guid IS NOT NULL

ORDER BY a.target_guid

 

O.k. I clearly reached my goal. OMC is connected with OEM CC. And that’s where it stops at the moment for this blogpost.

But now what? I would have love to show some more, but time is limited (and the trial period) . Clicking around in OMC is not very intuitive and you clearly need some time to figure out how to use this and discover the added value. You also need time to figure out what all the agents on your system are doing, except consuming resources.

A recap: the concept of OMC is still very promising, however the GUI and the technical implementation needs improvement. But hey, the developers of OMC has just begun, compared with the maturity of OEM CC 13c.

My major concern at the moment is the policy of Oracle. Their cloud-eagerness could led to a decision to replace OEM CC for OMC, while these products could be complementary to eachother, even reinforcing.

Regardz.

 

Resources:

– Deploying Oracle Management Cloud for Infrastructure Monitoring: https://technology.amis.nl/2016/12/18/deploying-oracle-management-cloud-for-infrastructure-monitoring/

– Documentation IT Analytics: http://docs.oracle.com/en/cloud/paas/management-cloud/emcad/setting-it-analytics.html

– Oracle Management Cloud Master Note: (Doc ID 2092091.1)

– IT Analytics Cloud Service Master Note (Doc ID 2107732.1)

-Configuration cloud agent for log analytics on jcs instance : https://technology.amis.nl/2017/01/20/configuring-cloud-agent-for-log-analytics-on-jcs-instance-to-collect-weblogic-log-files/

– Agent_install script: https://docs.oracle.com/en/cloud/paas/management-cloud/emcad/running-agentinstall-script.html

The post Connecting Oracle Management Cloud with Oracle Enterprise Manager 13c appeared first on AMIS Oracle and Java Blog.

Government Policy regarding Password Minimal age

Tom Kyte - Sun, 2017-02-12 03:46
I'm trying to look for the location on the Oracle database for the parameter that allows you to alter the minimal age for passwords in essence according to the Gov't policy I'm supposed to be able to limit users from changing passwords more than once...
Categories: DBA Blogs

Apache Kafka Streams – Running Top-N Aggregation grouped by Dimension – from and to Kafka Topic

Amis Blog - Sat, 2017-02-11 22:02

This article explains how to implement a streaming analytics application using Kafka Streams that performs a running Top N analysis on a Kafka Topic and produces the results to another Kafka Topic. Visualized, this looks like this:

image

Two previous articles are relevant as reference:

This GitHub Repo contains the sources for this article: https://github.com/lucasjellema/kafka-streams-running-topN.

Note that almost all aggregations are specializations of this top-N: min, max, sum, avg and count are all simple top-1 aggregations that can be implemented using a simplified version of this code.

To get started, go through the steps described in my previous article. This will result in an App.java class, that we can flesh out.

<br>package nl.amis.streams.countries;</p> <p>import nl.amis.streams.JsonPOJOSerializer;<br>import nl.amis.streams.JsonPOJODeserializer;</p> <p>// generic Java imports<br>import java.util.Properties;<br>import java.util.HashMap;<br>import java.util.Map;<br>import java.util.Arrays;<br>// Kafka imports<br>import org.apache.kafka.common.serialization.Serde;<br>import org.apache.kafka.common.serialization.Serdes;<br>import org.apache.kafka.common.serialization.Serializer;<br>import org.apache.kafka.common.serialization.Deserializer;<br>// Kafka Streams related imports<br>import org.apache.kafka.streams.StreamsConfig;<br>import org.apache.kafka.streams.KafkaStreams;<br>import org.apache.kafka.streams.kstream.KStream;<br>import org.apache.kafka.streams.kstream.KTable;<br>import org.apache.kafka.streams.kstream.KStreamBuilder;<br>import org.apache.kafka.streams.processor.WallclockTimestampExtractor;</p> <p>import org.apache.kafka.streams.kstream.Window;<br>import org.apache.kafka.streams.kstream.Windowed;<br>import org.apache.kafka.streams.kstream.Windows;<br>import org.apache.kafka.streams.kstream.TimeWindows;</p> <p>public class App {<br>static public class CountryMessage {<br>/* the JSON messages produced to the countries Topic have this structure:<br>{ "name" : "The Netherlands"<br>, "code" : "NL<br>, "continent" : "Europe"<br>, "population" : 17281811<br>, "size" : 42001<br>};<br><br>this class needs to have at least the corresponding fields to deserialize the JSON messages into<br>*/</p> <p>public String code;<br>public String name;<br>public int population;<br>public int size;<br>public String continent;<br>}</p> <p>static public class CountryTop3 {</p> <p>public CountryMessage[] nrs = new CountryMessage[4] ;<br>public CountryTop3() {}<br>}</p> <p>private static final String APP_ID = "countries-top3-kafka-streaming-analysis-app";</p> <p>public static void main(String[] args) {<br>System.out.println("Kafka Streams Top 3 Demonstration");</p> <p>// Create an instance of StreamsConfig from the Properties instance<br>StreamsConfig config = new StreamsConfig(getProperties());<br>final Serde &lt; String &gt; stringSerde = Serdes.String();<br>final Serde &lt; Long &gt; longSerde = Serdes.Long();</p> <p>// define countryMessageSerde<br>Map &lt; String, Object &gt; serdeProps = new HashMap &lt; String, Object &gt; ();<br>final Serializer &lt; CountryMessage &gt; countryMessageSerializer = new JsonPOJOSerializer &lt; &gt; ();<br>serdeProps.put("JsonPOJOClass", CountryMessage.class);<br>countryMessageSerializer.configure(serdeProps, false);</p> <p>final Deserializer &lt; CountryMessage &gt; countryMessageDeserializer = new JsonPOJODeserializer &lt; &gt; ();<br>serdeProps.put("JsonPOJOClass", CountryMessage.class);<br>countryMessageDeserializer.configure(serdeProps, false);<br>final Serde &lt; CountryMessage &gt; countryMessageSerde = Serdes.serdeFrom(countryMessageSerializer, countryMessageDeserializer);</p> <p>// define countryTop3Serde<br>serdeProps = new HashMap&lt;String, Object&gt;();<br>final Serializer&lt;CountryTop3&gt; countryTop3Serializer = new JsonPOJOSerializer&lt;&gt;();<br>serdeProps.put("JsonPOJOClass", CountryTop3.class);<br>countryTop3Serializer.configure(serdeProps, false);</p> <p>final Deserializer&lt;CountryTop3&gt; countryTop3Deserializer = new JsonPOJODeserializer&lt;&gt;();<br>serdeProps.put("JsonPOJOClass", CountryTop3.class);<br>countryTop3Deserializer.configure(serdeProps, false);<br>final Serde&lt;CountryTop3&gt; countryTop3Serde = Serdes.serdeFrom(countryTop3Serializer, countryTop3Deserializer );</p> <p>// building Kafka Streams Model<br>KStreamBuilder kStreamBuilder = new KStreamBuilder();<br>// the source of the streaming analysis is the topic with country messages<br>KStream&lt;String, CountryMessage&gt; countriesStream = <br>kStreamBuilder.stream(stringSerde, countryMessageSerde, "countries");</p> <p>// A hopping time window with a size of 5 minutes and an advance interval of 1 minute.<br>// The window's name -- the string parameter -- is used to e.g. name the backing state store.<br>long windowSizeMs = 5 * 60 * 1000L;<br>long advanceMs = 1 * 60 * 1000L;<br>TimeWindows.of("hopping-window-example", windowSizeMs).advanceBy(advanceMs);</p> <p>// THIS IS THE CORE OF THE STREAMING ANALYTICS:<br>// top 3 largest countries per continent, published to topic Top3CountrySizePerContinent<br>KTable&lt;String,CountryTop3&gt; top3PerContinent = countriesStream<br>// the dimension for aggregation is continent; assign the continent as the key for each message<br>.selectKey((k, country) -&gt; country.continent)<br>// for each key value (every continent in the stream) perform an aggregation<br>.aggregateByKey( <br>// first initialize a new CountryTop3 object, initially empty<br>CountryTop3::new<br>, // for each country in the continent, invoke the aggregator, passing in the continent, the country element and the CountryTop3 object for the continent <br>(continent, countryMsg, top3) -&gt; {<br>// add the new country as the last element in the nrs array<br>top3.nrs[3]=countryMsg;<br>// sort the array by country size, largest first<br>Arrays.sort(<br>top3.nrs, (a, b) -&gt; {<br>// in the initial cycles, not all nrs element contain a CountryMessage object <br>if (a==null) return 1;<br>if (b==null) return -1;<br>// with two proper CountryMessage objects, do the normal comparison<br>return Integer.compare(b.size, a.size);<br>}<br>);<br>// lose nr 4, only top 3 is relevant<br>top3.nrs[3]=null;<br>return (top3);<br>}<br>, stringSerde, countryTop3Serde<br>, "Top3LargestCountriesPerContinent"<br>);<br>// publish the Top3 messages to Kafka Topic Top3CountrySizePerContinent <br>top3PerContinent.to(stringSerde, countryTop3Serde, "Top3CountrySizePerContinent");</p> <p>// prepare Top3 messages to be printed to the console<br>top3PerContinent.&lt;String&gt;mapValues((top3) -&gt; {<br>String rank = " 1. "+top3.nrs[0].name+" - "+top3.nrs[0].size <br>+ ((top3.nrs[1]!=null)? ", 2. "+top3.nrs[1].name+" - "+top3.nrs[1].size:"")<br>+ ((top3.nrs[2]!=null) ? ", 3. "+top3.nrs[2].name+" - "+top3.nrs[2].size:"")<br>; <br>return "List for "+ top3.nrs[0].continent +rank;<br>} <br>)<br>.print(stringSerde,stringSerde);</p> <p>System.out.println("Starting Kafka Streams Countries Example");<br>KafkaStreams kafkaStreams = new KafkaStreams(kStreamBuilder, config);<br>kafkaStreams.start();<br>System.out.println("Now started CountriesStreams Example");<br>}</p> <p>private static Properties getProperties() {<br>Properties settings = new Properties();<br>// Set a few key parameters<br>settings.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID);<br>// Kafka bootstrap server (broker to talk to); ubuntu is the host name for my VM running Kafka, port 9092 is where the (single) broker listens <br>settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "ubuntu:9092");<br>// Apache ZooKeeper instance keeping watch over the Kafka cluster; ubuntu is the host name for my VM running Kafka, port 2181 is where the ZooKeeper listens <br>settings.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, "ubuntu:2181");<br>// default serdes for serialzing and deserializing key and value from and to streams in case no specific Serde is specified<br>settings.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());<br>settings.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());<br>settings.put(StreamsConfig.STATE_DIR_CONFIG, "C:\\temp");<br>// to work around exception Exception in thread "StreamThread-1" java.lang.IllegalArgumentException: Invalid timestamp -1<br>// at org.apache.kafka.clients.producer.ProducerRecord.&lt;init&gt;(ProducerRecord.java:60)<br>// see: https://groups.google.com/forum/#!topic/confluent-platform/5oT0GRztPBo<br>settings.put(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class);<br>return settings;<br>}</p> <p>}<br>

Some special attention for:

  • static class CountryTop3 – custom class to hold the actual top3 for a continent; objects based on this class are passed around in the aggregator, and are produced to the output stream & topic
    image
  • countryTop3Serde – defined for serializing CountryTop3 object to Kafka Topic, using the JsonPOJOSerializer that can translate a Java POJO to a JSON representation (that is subsequently serialized as String to the Kafka Topic)
  • aggregator implementation in lambda that performs the actual aggregation – the operatoin aggregateByKey (aggregateByKey) is invoked with an Initializer(Initializer) that returns the initial instance of the CountryTop3 object (per continent) on which the aggregate will be built, an Aggregator (Aggregator) that receives the continent, the CountryTop3 object and the next CountryMessage and upgrades the CountryTop3 object to include the new CountryMessage, the Serdes (serializer/deserializer) for the key and the value and a String that is the name of the resulting KTable.image
  • mapValues in order to create printable strings from the CountryTop3 object – the Lambda expression used in the call to mapValues gets a CountryTop3 object as input – the value in the top3PerContinent KTable – and maps it to a String. As a result, the KTable <String,CountryTop3> is mapped to a KTable<String,String> that is streamed to the print operation, using the stringSerde for serializing the two String values.image

 

 

To run the application, go through these four command line steps steps

  • (in an empty directory:)
    git clone https://github.com/lucasjellema/kafka-streams-running-topN
  • (navigate to directory kafka-streams-running-topN\Kafka-Streams-Country-TopN)
    mvn package
  • (in the same directory)
    mvn install dependency:copy-dependencies
  • (in the same directory)
    java -cp target/Kafka-Streams-Country-TopN-1.0-SNAPSHOT.jar;target/dependency/* nl.amis.streams.countries.App

 

 

image

 

Here is a screenshot of the Node.JS application busing producing country messages:

image

And here is some of the output produced by the Kafka Streams application:

image

Note how Mayotte enters at position one for the African continent, only to be quickly relegated by first Mozambique and then Namibia, only to disappear from the running top 3 when the message for Niger is consumed in the stream.

You should also know that instead of simply pushing every change to the destination topic, we can using timing control – to calculate aggregates over a time slice and or produce outcomes only once every so often. I will demonstrate this is in a subsequent article.

Kafka Tool shows us the topics involved in this article:

image

  • countries is the source, produced to by the Node.js application
  • Top3CountrySizePerContinent is the destination topic for the Kafka Streams application, to which the running Top 3 messages are produced
  • countries-topn-streaming-analysis-app-Top3LargestCountriesPerContinent-changelog is a Topic created by Kafka Streams on the fly as store for intermediate results; the name of this Topic is derived from the (intermediate) KTable create in the streaming application.

By routing the KTable to a Topic, all change events on the table are produced to the Topic. What I would have liked to be able to do is have only the latest message for each key – in this case the most recent top 3 for each continent – on the Topic. That is not what Kafka Streams does for me, not even when I am producing a KTable as opposed to a KStream. One thing I can do in this case is enable Log Compaction for the topic – although that is more like a hint to the Kafka engine than a strict instruction for removing older messages on the Topic for a key.

Note: the Kafka Streaming application makes use of RocksDB – a simple local Java client database – to hold intermediate results. RocksDB stores data locally in a directory that can be configured. During development, when you run the same analysis on the same set of test data, over and over again, you may get unexpected results, because RocksDB continues with the data it has retained from previous runs. It may be wise to delete the RocksDB local data repository regularly, by just deleting the directory:

image
Resources

An interesting resource is the Kafka Streams example KafkaMusicExample.java on generating a running Top 5 of all songs being played.

A good read is the article Processing Tweets with Kafka Streams https://www.madewithtea.com/processing-tweets-with-kafka-streams.html

The post Apache Kafka Streams – Running Top-N Aggregation grouped by Dimension – from and to Kafka Topic appeared first on AMIS Oracle and Java Blog.

row pieces, 255 columns, intra-block row chaining in details

XTended Oracle SQL - Sat, 2017-02-11 20:54

You may know about Intra-block row chaining which may occur when the number of columns in a table are more than 255 columns.
But do you know that intra-block chaining works with inserts only? not updates!

Documentation says:

When a table has more than 255 columns, rows that have data after the 255th column are likely to be chained within the same block. This is called intra-block chaining. A chained row’s pieces are chained together using the rowids of the pieces. With intra-block chaining, users receive all the data in the same block. If the row fits in the block, users do not see an effect in I/O performance, because no extra I/O operation is required to retrieve the rest of the row.

A bit more details:
1. One row piece can store up to 255 columns
2. Oracle splits fields by row pieces in reverse order
3. Oracle doesn’t store trailing null fields in a row (not in row piece)
4. Next row piece can be stored in the same block only with inserts. When you run update, oracle will place new row piece into another block.

I’ll show in examples with dumps:

Example 1:
1. Create table TEST with 355 columns (c_1, c_2, …, c_355)
2. insert into test(c_300) values(2)
3. dump data blocks

test code

drop table test purge;
set serverout on;
alter session set tracefile_identifier='test1';
declare
   c varchar2(32000);
   v varchar2(32000);
   cols varchar2(32000):='c_1 number(1,0)';
   vals varchar2(32000):='null';
   
   ndf int;
   nbl int;
begin
   for i in 2..355 loop
      cols:=cols||',c_'||i||' number(1,0)';
      vals:=vals||','||case 
                         when i = 300 then '2'
                         else 'null'
                       end;
   end loop;
   c:='create table test('||cols||')';
   v:='insert into test values('||vals||')';
   dbms_output.put_line(c);
   dbms_output.put_line(v);
   execute immediate (c);
   execute immediate (v);
   
   for r in (select 'alter system dump datafile '||file#||' block '||block# cmd 
             from (
                   select distinct file#,block# 
                   from v$bh 
                   where class#=1 
                   and objd in (select o.data_object_id from user_objects o where object_name='TEST') 
                   order by 1,2
                  )
            )
   loop 
       execute immediate r.cmd;
   end loop;
end;
/
disc;

[collapse]
grep -P “^(bdba|block_row_dump|tl: |col )” test1.trc

bdba: 0x018019f3
block_row_dump:

bdba: 0x018019f4
block_row_dump:

bdba: 0x018019f5
block_row_dump:

bdba: 0x018019f6
block_row_dump:

bdba: 0x018019f7
    block_row_dump:
        tl: 260 fb: -----L-- lb: 0x1  cc: 255
            col  0: *NULL*
            col  1: *NULL*
            col  2: *NULL*
            col  3: *NULL*
               ...
            col 252: *NULL*
            col 253: *NULL*
            col 254: [ 2]  c1 03
        tl: 54 fb: --H-F--- lb: 0x1  cc: 45
            col  0: *NULL*
            col  1: *NULL*
            col  2: *NULL*
            col  3: *NULL*
               ...
            col 42: *NULL*
            col 43: *NULL*
            col 44: *NULL*

The resulted dump file shows us:
1. Both row pieces are in the same block 0x018019f4
2. They contain only first 300 columns, (trailing 55 columns are NULLs)
3. First row piece contains columns c_46 – c_300,
4. Second row piece contains columns c_1 – c_45 (they all are NULLs)

Example 2.
But let’s test an update with the same table:
1. Create table TEST with 355 columns (c_1, c_2, …, c_355)
2. insert into test(c_1) values(null)
3. update test set c_300=2
4. dump data blocks

Test code 2

drop table test purge;
set serverout on;
alter session set tracefile_identifier='test2';
declare
   c varchar2(32000);
   cols varchar2(32000):='c_1 number(1,0)';
begin
   for i in 2..355 loop
      cols:=cols||',c_'||i||' number(1,0)';
   end loop;
   c:='create table test('||cols||')';
   execute immediate (c);
   execute immediate ('insert into test(c_1) values(null)');
   execute immediate 'update test set c_300=3';
   commit;
   for r in (select 'alter system dump datafile '||file#||' block '||block# cmd 
             from (
                   select distinct file#,block# 
                   from v$bh 
                   where class#=1 
                   and objd in (select o.data_object_id from user_objects o where object_name='TEST') 
                   order by 1,2
                  )
            )
   loop 
       execute immediate r.cmd;
   end loop;
end;
/
disc;

[collapse]
Dump:

bdba: 0x018019f3
    block_row_dump:
        tl: 260 fb: -----L-- lb: 0x1  cc: 255
            col  0: *NULL*
            col  1: *NULL*
            col  2: *NULL*
               ...
            col 251: *NULL*
            col 252: *NULL*
            col 253: *NULL*
            col 254: [ 2]  c1 04

bdba: 0x018019f4
block_row_dump:

bdba: 0x018019f5
block_row_dump:

bdba: 0x018019f6
block_row_dump:

bdba: 0x018019f7
    block_row_dump:
        tl: 54 fb: --H-F--- lb: 0x1  cc: 45
            col  0: *NULL*
            col  1: *NULL*
            col  2: *NULL*
               ...
            col 42: *NULL*
            col 43: *NULL*
            col 44: *NULL*

As you can see, there is no intra-block chaining – second row piece was created in another block.

Example 3.
Now I want to show how oracle splits already chained rows:
1. Create table TEST with 355 columns (c_1, c_2, …, c_355)
2. insert into test(c_1) values(1)
3. update test set c_300=2
4. update test set c_301=3
5. update test set c_302=4
6. dump data blocks

Test code 3

drop table test purge;
set serverout on
alter session set tracefile_identifier='test3';

declare
   cols varchar2(32000):='c_1 number(1,0)';
   
   procedure print_and_exec(c varchar2) as
   begin
      dbms_output.put_line(c);
      execute immediate c;
   end;
begin
   for i in 2..355 loop
      cols:=cols||',c_'||i||' number(1,0)';
   end loop;
   print_and_exec ('create table test('||cols||')');
   print_and_exec ('insert into test(c_1) values(1)');
   print_and_exec ('update test set c_300=2');
   print_and_exec ('update test set c_301=3');
   print_and_exec ('update test set c_302=4');
   commit;
   execute immediate 'alter system flush buffer_cache';
   execute immediate 'select count(*) from test';
   for r in (select 'alter system dump datafile '||file#||' block '||block# cmd 
             from (
                   select distinct file#,block# 
                   from v$bh 
                   where class#=1 
                   and objd in (select o.data_object_id from user_objects o where object_name='TEST') 
                   order by 1,2
                  )
            )
   loop 
       execute immediate r.cmd;
   end loop;
end;
/
disc;

[collapse]

bdba: 0x018019f3
    block_row_dump:
        tl: 10 fb: -------- lb: 0x1  cc: 1
            col  0: *NULL*

bdba: 0x018019f4
    block_row_dump:
        tl: 264 fb: -----L-- lb: 0x1  cc: 255
            col  0: *NULL*
            col  1: *NULL*
               ...
            col 249: *NULL*
            col 250: *NULL*
            col 251: *NULL*
            col 252: [ 2]  c1 03
            col 253: [ 2]  c1 04
            col 254: [ 2]  c1 05

bdba: 0x018019f5
block_row_dump:

bdba: 0x018019f6
    block_row_dump:
        tl: 10 fb: -------- lb: 0x1  cc: 1
            col  0: *NULL*

bdba: 0x018019f7
    block_row_dump:
        tl: 56 fb: --H-F--- lb: 0x1  cc: 45
            col  0: [ 2]  c1 02
            col  1: *NULL*
            col  2: *NULL*
            col  3: *NULL*
               ...
            col 42: *NULL*
            col 43: *NULL*
            col 44: *NULL*

This dump shows us 4 row pieces: First row piece contains 255 columns, second – 45, and 2 row pieces – just by one row.
So we can analyze it step-by-step:
2. insert into test(c_1) values(1)
After insert we have just one row piece with 1 field.

3. update test set c_300=2
After this update, we have 2 row pieces:
1) c_1-c_45
2) c_46-c_300

4. update test set c_301=3
This update split row piece c_46-c_300 into 2 row pieces:
1) c_46
2) c_47-c_301
So we have 3 row pieces now: c_1-c_45, c_46, c_47-c_301

5. update test set c_302=4
This update split row piece c_47-c_301 into 2 row pieces:
1) c_47
2) c_48-c_302
And we’ve got 4 row pieces: c_1-c_45, c_46, c_47, c_48-c_302

You can try Example 4 and see how many blocks you can get, and all of them (except last one) will have only 1 column each:

Test code 4

drop table test purge;
set serverout on
alter session set tracefile_identifier='test4';

declare
   cols varchar2(32000):='c_1 char(3)';
   
   procedure print_and_exec(c varchar2) as
   begin
      dbms_output.put_line(c);
      execute immediate c;
   end;
begin
   for i in 2..355 loop
      cols:=cols||',c_'||i||' char(3)';
   end loop;
   print_and_exec ('create table test('||cols||')');
   print_and_exec ('insert into test(c_1) values(null)');
   commit;
   for i in 256..355 loop
      execute immediate 'update test set c_'||i||'='||i;
   end loop;
   commit;
   execute immediate 'alter system flush buffer_cache';
   for r in (select 'alter system dump datafile '||file#||' block '||block# cmd 
             from (
                   select distinct file#,block# 
                   from v$bh 
                   where class#=1 
                   and objd in (select o.data_object_id from user_objects o where object_name='TEST') 
                   order by 1,2
                  )
            )
   loop 
       execute immediate r.cmd;
   end loop;
end;
/
disc;

[collapse]

grep

bdba: 0x01801281
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801282
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801283
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801284
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801285
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801286
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801287
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801288
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801289
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128a
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128b
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128c
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128d
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128e
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180128f
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801291
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801292
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801293
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801294
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801295
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801296
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801297
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801298
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x01801299
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129a
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129b
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129c
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129d
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129e
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x0180129f
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a1
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a2
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a3
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a4
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a5
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a6
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a7
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a8
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012a9
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012aa
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ab
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ac
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ad
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ae
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012af
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b1
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b2
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b3
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b4
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b5
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b6
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b7
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b8
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012b9
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ba
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012bb
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012bc
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012bd
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012be
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012bf
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c1
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c2
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c3
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c4
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c5
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c6
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c7
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c8
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012c9
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ca
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012cb
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012cc
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012cd
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012ce
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012cf
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d1
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d2
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d3
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d4
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d5
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d6
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d7
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d8
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012d9
block_row_dump:
bdba: 0x018012da
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012db
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012dc
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012dd
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018012de
block_row_dump:
tl: 558 fb: -----L-- lb: 0x1  cc: 255
col  0: *NULL*
col  1: *NULL*
col  2: *NULL*
   ...
col 152: *NULL*
col 153: *NULL*
col 154: *NULL*
col 155: [ 3]  32 35 36
col 156: [ 3]  32 35 37
col 157: [ 3]  32 35 38
col 158: [ 3]  32 35 39
  ...
col 251: [ 3]  33 35 32
col 252: [ 3]  33 35 33
col 253: [ 3]  33 35 34
col 254: [ 3]  33 35 35
bdba: 0x018012df
block_row_dump:
bdba: 0x018019f3
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019f4
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019f5
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019f6
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019f7
block_row_dump:
tl: 10 fb: --H-F--- lb: 0x2  cc: 1
col  0: *NULL*
bdba: 0x018019f8
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019f9
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019fa
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019fb
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019fc
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019fd
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019fe
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*
bdba: 0x018019ff
block_row_dump:
tl: 10 fb: -------- lb: 0x1  cc: 1
col  0: *NULL*

[collapse]

Categories: Development

Review at amazon: Fundamentals of 5G mobile networks

Dietrich Schroff - Sat, 2017-02-11 16:02
Today i read the book "Fundamentals of 5G mobile networks":

This book is a collection of 10 articles around 5G, framed by an introduction and summary from the editor Jonathan Rodriguez.

More or less every article starts with the same motivation for 5G: mobile traffic will increase 1000times form 2010 to 2020.
Here some findings from the articles:
  • 5G is not one radio access technology (RAT). It will be a collection of RATs like small cells, SON, ... (chapter 1)
  • Spectral efficiency is about 1 bps/Hz/cell - gains can be achieved with multi-antenna techniques and small cells (chapter 3)
  • 5G devices are expected to exploit spectrum opportunities efficiently on the fly (chapter 6)
  • new transceivers have to be built with excellent power saving characteristics and tunable RF frontend radio. (chapter 11)
If you are interested, take a look at my review at amazon.de. (like all my reviews: written in german ;-) 

Data Pump or Data Pain

Michael Dinh - Sat, 2017-02-11 13:30

WARNING: Rants ahead.

Simple request migrate schema from one database to another, right?

Create new database perform schema export and import this only works if objects are self contained.

The following objects are missing from schema export to name a few.
DATABASE_EXPORT/PASSWORD_VERIFY_FUNCTION
DATABASE_EXPORT/SCHEMA/PUBLIC_SYNONYM/SYNONYM

Here’s what was done and hopefully did not missed anything. TBS was pre-created.

$ cat impdp_full_public.par

directory=DATA_PUMP_DIR
userid="/ as sysdba"
metrics=Y
dumpfile=full.dmp
logfile=impdp_full_public.log
include=PASSWORD_VERIFY_FUNCTION
include=PROFILE
include=ROLE
include=PUBLIC_SYNONYM/SYNONYM

$ cat impdp_full_schema.par

directory=DATA_PUMP_DIR
userid="/ as sysdba"
metrics=Y
dumpfile=full.dmp
logfile=impdp_full_schema.log
include=SCHEMA:"IN ('DEMO')"
DEMO

$ cat expdp_schema.par

directory=DATA_PUMP_DIR
userid="/ as sysdba"
flashback_time="TO_TIMESTAMP(TO_CHAR(systimestamp,'DD-MM-YYYY HH24:MI:SS'),'DD-MM-YYYY HH24:MI:SS')"
metrics=Y
reuse_dumpfiles=Y
dumpfile=schema.dmp
logfile=exp_schema.log
SCHEMAS=ggs_admin,demo

$ expdp parfile=expdp_schema.par

Export: Release 11.2.0.4.0 - Production on Sat Feb 11 10:47:22 2017

Copyright (c) 1982, 2011, Oracle and/or its affiliates.  All rights reserved.

Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning and Real Application Testing options
FLASHBACK automatically enabled to preserve database integrity.
Starting "SYS"."SYS_EXPORT_SCHEMA_01":  /******** AS SYSDBA parfile=expdp_schema.par
Startup took 1 seconds
Estimate in progress using BLOCKS method...
Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
     Estimated 12 TABLE_DATA objects in 0 seconds
Total estimation using BLOCKS method: 19 MB
Processing object type SCHEMA_EXPORT/USER
     Completed 2 USER objects in 0 seconds
Processing object type SCHEMA_EXPORT/SYSTEM_GRANT
     Completed 14 SYSTEM_GRANT objects in 0 seconds
Processing object type SCHEMA_EXPORT/ROLE_GRANT
     Completed 5 ROLE_GRANT objects in 0 seconds
Processing object type SCHEMA_EXPORT/DEFAULT_ROLE
     Completed 2 DEFAULT_ROLE objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLESPACE_QUOTA
     Completed 1 TABLESPACE_QUOTA objects in 0 seconds
Processing object type SCHEMA_EXPORT/PRE_SCHEMA/PROCACT_SCHEMA
     Completed 3 PROCACT_SCHEMA objects in 1 seconds
Processing object type SCHEMA_EXPORT/SYNONYM/SYNONYM
     Completed 2 SYNONYM objects in 0 seconds
Processing object type SCHEMA_EXPORT/TYPE/TYPE_SPEC
     Completed 2 TYPE objects in 0 seconds
Processing object type SCHEMA_EXPORT/SEQUENCE/SEQUENCE
     Completed 1 SEQUENCE objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/PROCACT_INSTANCE
     Completed 14 PROCACT_INSTANCE objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/TABLE
     Completed 12 TABLE objects in 0 seconds
Processing object type SCHEMA_EXPORT/FUNCTION/FUNCTION
     Completed 1 FUNCTION objects in 0 seconds
Processing object type SCHEMA_EXPORT/PROCEDURE/PROCEDURE
     Completed 2 PROCEDURE objects in 0 seconds
Processing object type SCHEMA_EXPORT/FUNCTION/ALTER_FUNCTION
     Completed 1 ALTER_FUNCTION objects in 0 seconds
Processing object type SCHEMA_EXPORT/PROCEDURE/ALTER_PROCEDURE
     Completed 2 ALTER_PROCEDURE objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/INDEX/INDEX
     Completed 1 INDEX objects in 1 seconds
Processing object type SCHEMA_EXPORT/TABLE/CONSTRAINT/CONSTRAINT
     Completed 4 CONSTRAINT objects in 1 seconds
Processing object type SCHEMA_EXPORT/TABLE/INDEX/STATISTICS/INDEX_STATISTICS
     Completed 7 INDEX_STATISTICS objects in 0 seconds
Processing object type SCHEMA_EXPORT/VIEW/VIEW
     Completed 5 VIEW objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/STATISTICS/TABLE_STATISTICS
     Completed 12 TABLE_STATISTICS objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/POST_INSTANCE/PROCACT_INSTANCE
     Completed 8 PROCACT_INSTANCE objects in 0 seconds
Processing object type SCHEMA_EXPORT/TABLE/POST_INSTANCE/PROCDEPOBJ
     Completed 8 PROCDEPOBJ objects in 1 seconds
Processing object type SCHEMA_EXPORT/POST_SCHEMA/PROCOBJ
     Completed 11 PROCOBJ objects in 0 seconds
Processing object type SCHEMA_EXPORT/POST_SCHEMA/PROCACT_SCHEMA
     Completed 1 PROCACT_SCHEMA objects in 0 seconds
. . exported "DEMO"."T"                                  6.087 MB   68540 rows
. . exported "GGS_ADMIN"."OGG$Q_TAB_E_HAWK"              16.98 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_C"        5.804 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_G"        13.16 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_H"        9.039 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_I"        9.007 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_L"        7.828 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_S"        11.31 KB       3 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_T"        6.218 KB       0 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT_SEED"             11.81 KB       1 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT"                      0 KB       0 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT_HISTORY"              0 KB       0 rows
     Completed 12 SCHEMA_EXPORT/TABLE/TABLE_DATA objects in 1 seconds
Master table "SYS"."SYS_EXPORT_SCHEMA_01" successfully loaded/unloaded
******************************************************************************
Dump file set for SYS.SYS_EXPORT_SCHEMA_01 is:
  /u01/app/oracle/product/11.2.0.4/db_1/rdbms/log/schema.dmp
Job "SYS"."SYS_EXPORT_SCHEMA_01" successfully completed at Sat Feb 11 10:47:29 2017 elapsed 0 00:00:07

$ cat expdp_full.par

directory=DATA_PUMP_DIR
userid="/ as sysdba"
flashback_time=systimestamp
metrics=Y
exclude=statistics
reuse_dumpfiles=Y
dumpfile=full.dmp
#PARALLEL=2
#DUMPFILE=full%U.dmp
logfile=expdp_full.log
FULL=Y

$ expdp parfile=expdp_full.par

Export: Release 11.2.0.4.0 - Production on Sat Feb 11 10:59:36 2017

Copyright (c) 1982, 2011, Oracle and/or its affiliates.  All rights reserved.

Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning and Real Application Testing options
FLASHBACK automatically enabled to preserve database integrity.
Starting "SYS"."SYS_EXPORT_FULL_01":  /******** AS SYSDBA parfile=expdp_full.par
Startup took 1 seconds
Estimate in progress using BLOCKS method...
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/TABLE_DATA
     Estimated 69 TABLE_DATA objects in 3 seconds
Total estimation using BLOCKS method: 23.31 MB
Processing object type DATABASE_EXPORT/TABLESPACE
     Completed 4 TABLESPACE objects in 1 seconds
Processing object type DATABASE_EXPORT/PASSWORD_VERIFY_FUNCTION
     Completed 1 PASSWORD_VERIFY_FUNCTION objects in 0 seconds
Processing object type DATABASE_EXPORT/PROFILE
     Completed 1 PROFILE objects in 0 seconds
Processing object type DATABASE_EXPORT/SYS_USER/USER
     Completed 1 USER objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/USER
     Completed 4 USER objects in 0 seconds
Processing object type DATABASE_EXPORT/ROLE
     Completed 16 ROLE objects in 0 seconds
Processing object type DATABASE_EXPORT/GRANT/SYSTEM_GRANT/PROC_SYSTEM_GRANT
     Completed 4 PROC_SYSTEM_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/GRANT/SYSTEM_GRANT
     Completed 48 SYSTEM_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/ROLE_GRANT
     Completed 49 ROLE_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/DEFAULT_ROLE
     Completed 4 DEFAULT_ROLE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLESPACE_QUOTA
     Completed 1 TABLESPACE_QUOTA objects in 0 seconds
Processing object type DATABASE_EXPORT/RESOURCE_COST
     Completed 1 RESOURCE_COST objects in 0 seconds
Processing object type DATABASE_EXPORT/TRUSTED_DB_LINK
     Completed 1 TRUSTED_DB_LINK objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/SEQUENCE/SEQUENCE
     Completed 18 SEQUENCE objects in 0 seconds
Processing object type DATABASE_EXPORT/DIRECTORY/DIRECTORY
     Completed 1 DIRECTORY objects in 0 seconds
Processing object type DATABASE_EXPORT/DIRECTORY/GRANT/OWNER_GRANT/OBJECT_GRANT
     Completed 4 OBJECT_GRANT objects in 1 seconds
Processing object type DATABASE_EXPORT/CONTEXT
     Completed 3 CONTEXT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PUBLIC_SYNONYM/SYNONYM
     Completed 6 SYNONYM objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/SYNONYM
     Completed 10 SYNONYM objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TYPE/TYPE_SPEC
     Completed 11 TYPE objects in 0 seconds
Processing object type DATABASE_EXPORT/SYSTEM_PROCOBJACT/PRE_SYSTEM_ACTIONS/PROCACT_SYSTEM
     Completed 3 PROCACT_SYSTEM objects in 0 seconds
Processing object type DATABASE_EXPORT/SYSTEM_PROCOBJACT/PROCOBJ
     Completed 17 PROCOBJ objects in 0 seconds
Processing object type DATABASE_EXPORT/SYSTEM_PROCOBJACT/POST_SYSTEM_ACTIONS/PROCACT_SYSTEM
     Completed 4 PROCACT_SYSTEM objects in 1 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PROCACT_SCHEMA
     Completed 7 PROCACT_SCHEMA objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/PROCACT_INSTANCE
     Completed 14 PROCACT_INSTANCE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/TABLE
     Completed 72 TABLE objects in 1 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/PRE_TABLE_ACTION
     Completed 6 PRE_TABLE_ACTION objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/GRANT/OWNER_GRANT/OBJECT_GRANT
     Completed 24 OBJECT_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/COMMENT
     Completed 424 COMMENT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PACKAGE/PACKAGE_SPEC
     Completed 1 PACKAGE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/FUNCTION/FUNCTION
     Completed 5 FUNCTION objects in 1 seconds
Processing object type DATABASE_EXPORT/SCHEMA/FUNCTION/GRANT/OWNER_GRANT/OBJECT_GRANT
     Completed 4 OBJECT_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PROCEDURE/PROCEDURE
     Completed 4 PROCEDURE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/FUNCTION/ALTER_FUNCTION
     Completed 5 ALTER_FUNCTION objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PROCEDURE/ALTER_PROCEDURE
     Completed 4 ALTER_PROCEDURE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/INDEX/INDEX
     Completed 106 INDEX objects in 1 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/CONSTRAINT/CONSTRAINT
     Completed 89 CONSTRAINT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/VIEW/VIEW
     Completed 17 VIEW objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/VIEW/GRANT/OWNER_GRANT/OBJECT_GRANT
     Completed 3 OBJECT_GRANT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/VIEW/COMMENT
     Completed 7 COMMENT objects in 1 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PACKAGE_BODIES/PACKAGE/PACKAGE_BODY
     Completed 1 PACKAGE_BODY objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/CONSTRAINT/REF_CONSTRAINT
     Completed 36 REF_CONSTRAINT objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/POST_TABLE_ACTION
     Completed 4 POST_TABLE_ACTION objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/TRIGGER
     Completed 2 TRIGGER objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/POST_INSTANCE/PROCACT_INSTANCE
     Completed 8 PROCACT_INSTANCE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/TABLE/POST_INSTANCE/PROCDEPOBJ
     Completed 8 PROCDEPOBJ objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/POST_SCHEMA/PROCOBJ
     Completed 11 PROCOBJ objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/POST_SCHEMA/PROCACT_SCHEMA
     Completed 4 PROCACT_SCHEMA objects in 0 seconds
Processing object type DATABASE_EXPORT/AUDIT
     Completed 29 AUDIT objects in 1 seconds
. . exported "DEMO"."T"                                  6.087 MB   68540 rows
. . exported "GGS_ADMIN"."OGG$Q_TAB_E_HAWK"              16.98 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_C"        5.804 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_G"        13.16 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_H"        9.039 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_I"        9.007 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_L"        7.828 KB       0 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_S"        11.31 KB       3 rows
. . exported "GGS_ADMIN"."AQ$_OGG$Q_TAB_E_HAWK_T"        6.218 KB       0 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT_SEED"             11.81 KB       1 rows
. . exported "SYSTEM"."DEF$_LOB"                         6.664 KB       0 rows
. . exported "OUTLN"."OL$HINTS"                          13.12 KB       0 rows
. . exported "SYSTEM"."DEF$_AQCALL"                      14.73 KB       0 rows
. . exported "SYSTEM"."DEF$_AQERROR"                     14.73 KB       0 rows
. . exported "SYSTEM"."REPCAT$_EXCEPTIONS"               7.843 KB       0 rows
. . exported "SYSTEM"."REPCAT$_INSTANTIATION_DDL"         6.25 KB       0 rows
. . exported "SYSTEM"."REPCAT$_RUNTIME_PARMS"            5.859 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_OBJECTS"         10.72 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_PARMS"           7.085 KB       0 rows
. . exported "SYSTEM"."REPCAT$_USER_PARM_VALUES"         6.257 KB       0 rows
. . exported "OUTLN"."OL$"                               10.17 KB       0 rows
. . exported "OUTLN"."OL$NODES"                          7.781 KB       0 rows
. . exported "SYSTEM"."DEF$_CALLDEST"                        7 KB       0 rows
. . exported "SYSTEM"."DEF$_DEFAULTDEST"                 5.007 KB       0 rows
. . exported "SYSTEM"."DEF$_DESTINATION"                 13.50 KB       0 rows
. . exported "SYSTEM"."DEF$_ERROR"                       8.210 KB       0 rows
. . exported "SYSTEM"."DEF$_ORIGIN"                      7.390 KB       0 rows
. . exported "SYSTEM"."DEF$_PROPAGATOR"                  5.796 KB       0 rows
. . exported "SYSTEM"."DEF$_PUSHED_TRANSACTIONS"         6.218 KB       0 rows
. . exported "SYSTEM"."REPCAT$_AUDIT_ATTRIBUTE"          6.328 KB       2 rows
. . exported "SYSTEM"."REPCAT$_AUDIT_COLUMN"             7.843 KB       0 rows
. . exported "SYSTEM"."REPCAT$_COLUMN_GROUP"             6.210 KB       0 rows
. . exported "SYSTEM"."REPCAT$_CONFLICT"                 6.226 KB       0 rows
. . exported "SYSTEM"."REPCAT$_DDL"                      7.406 KB       0 rows
. . exported "SYSTEM"."REPCAT$_EXTENSION"                9.890 KB       0 rows
. . exported "SYSTEM"."REPCAT$_FLAVORS"                  7.390 KB       0 rows
. . exported "SYSTEM"."REPCAT$_FLAVOR_OBJECTS"           8.187 KB       0 rows
. . exported "SYSTEM"."REPCAT$_GENERATED"                8.640 KB       0 rows
. . exported "SYSTEM"."REPCAT$_GROUPED_COLUMN"           6.601 KB       0 rows
. . exported "SYSTEM"."REPCAT$_KEY_COLUMNS"              6.203 KB       0 rows
. . exported "SYSTEM"."REPCAT$_OBJECT_PARMS"             5.429 KB       0 rows
. . exported "SYSTEM"."REPCAT$_OBJECT_TYPES"             6.882 KB      28 rows
. . exported "SYSTEM"."REPCAT$_PARAMETER_COLUMN"         8.679 KB       0 rows
. . exported "SYSTEM"."REPCAT$_PRIORITY"                 9.070 KB       0 rows
. . exported "SYSTEM"."REPCAT$_PRIORITY_GROUP"           6.656 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REFRESH_TEMPLATES"        10.69 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPCAT"                   7.398 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPCATLOG"                13.09 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPCOLUMN"                13.79 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPGROUP_PRIVS"           7.390 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPOBJECT"                10.67 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPPROP"                  8.226 KB       0 rows
. . exported "SYSTEM"."REPCAT$_REPSCHEMA"                8.609 KB       0 rows
. . exported "SYSTEM"."REPCAT$_RESOLUTION"                8.25 KB       0 rows
. . exported "SYSTEM"."REPCAT$_RESOLUTION_METHOD"        5.835 KB      19 rows
. . exported "SYSTEM"."REPCAT$_RESOLUTION_STATISTICS"    8.265 KB       0 rows
. . exported "SYSTEM"."REPCAT$_RESOL_STATS_CONTROL"      7.835 KB       0 rows
. . exported "SYSTEM"."REPCAT$_SITES_NEW"                7.015 KB       0 rows
. . exported "SYSTEM"."REPCAT$_SITE_OBJECTS"             6.210 KB       0 rows
. . exported "SYSTEM"."REPCAT$_SNAPGROUP"                    7 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_REFGROUPS"       7.046 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_SITES"           9.062 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_STATUS"          5.484 KB       3 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_TARGETS"         6.648 KB       0 rows
. . exported "SYSTEM"."REPCAT$_TEMPLATE_TYPES"           6.289 KB       2 rows
. . exported "SYSTEM"."REPCAT$_USER_AUTHORIZATIONS"      5.828 KB       0 rows
. . exported "SYSTEM"."SQLPLUS_PRODUCT_PROFILE"          7.867 KB       0 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT"                      0 KB       0 rows
. . exported "GGS_ADMIN"."GG_HEARTBEAT_HISTORY"              0 KB       0 rows
     Completed 69 DATABASE_EXPORT/SCHEMA/TABLE/TABLE_DATA objects in 2 seconds
Master table "SYS"."SYS_EXPORT_FULL_01" successfully loaded/unloaded
******************************************************************************
Dump file set for SYS.SYS_EXPORT_FULL_01 is:
  /u01/app/oracle/product/11.2.0.4/db_1/rdbms/log/full.dmp
Job "SYS"."SYS_EXPORT_FULL_01" successfully completed at Sat Feb 11 10:59:53 2017 elapsed 0 00:00:17
$ cat impdp_full_sql.par
directory=DATA_PUMP_DIR
userid="/ as sysdba"
metrics=Y
dumpfile=full.dmp
logfile=impdp_syn.log
include=PASSWORD_VERIFY_FUNCTION
include=PROFILE
include=ROLE
include=PUBLIC_SYNONYM/SYNONYM
sqlfile=impdp_full.sql

$ impdp parfile=impdp_full_sql.par

Import: Release 11.2.0.4.0 - Production on Sat Feb 11 11:09:06 2017

Copyright (c) 1982, 2011, Oracle and/or its affiliates.  All rights reserved.

Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Partitioning and Real Application Testing options
Startup took 0 seconds
Master table "SYS"."SYS_SQL_FILE_FULL_01" successfully loaded/unloaded
Starting "SYS"."SYS_SQL_FILE_FULL_01":  /******** AS SYSDBA parfile=impdp_full_sql.par
Processing object type DATABASE_EXPORT/PASSWORD_VERIFY_FUNCTION
     Completed 1 PASSWORD_VERIFY_FUNCTION objects in 0 seconds
Processing object type DATABASE_EXPORT/PROFILE
     Completed 1 PROFILE objects in 0 seconds
Processing object type DATABASE_EXPORT/ROLE
     Completed 16 ROLE objects in 0 seconds
Processing object type DATABASE_EXPORT/SCHEMA/PUBLIC_SYNONYM/SYNONYM
     Completed 6 SYNONYM objects in 0 seconds
Job "SYS"."SYS_SQL_FILE_FULL_01" successfully completed at Sat Feb 11 11:09:09 2017 elapsed 0 00:00:02

$ cat $ORACLE_HOME/rdbms/log/impdp_full.sql

-- CONNECT SYS
ALTER SESSION SET EVENTS '10150 TRACE NAME CONTEXT FOREVER, LEVEL 1';
ALTER SESSION SET EVENTS '10904 TRACE NAME CONTEXT FOREVER, LEVEL 1';
ALTER SESSION SET EVENTS '25475 TRACE NAME CONTEXT FOREVER, LEVEL 1';
ALTER SESSION SET EVENTS '10407 TRACE NAME CONTEXT FOREVER, LEVEL 1';
ALTER SESSION SET EVENTS '10851 TRACE NAME CONTEXT FOREVER, LEVEL 1';
ALTER SESSION SET EVENTS '22830 TRACE NAME CONTEXT FOREVER, LEVEL 192 ';
-- new object type path: DATABASE_EXPORT/PASSWORD_VERIFY_FUNCTION
CREATE FUNCTION verify_function_11G
(username varchar2,
  password varchar2,
  old_password varchar2)
  RETURN boolean IS
   n boolean;
   m integer;
   differ integer;
   isdigit boolean;
   ischar  boolean;
   ispunct boolean;
   db_name varchar2(40);
   digitarray varchar2(20);
   punctarray varchar2(25);
   chararray varchar2(52);
   i_char varchar2(10);
   simple_password varchar2(10);
   reverse_user varchar2(32);

BEGIN
   digitarray:= '0123456789';
   chararray:= 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';

   -- Check for the minimum length of the password
   IF length(password) < 8 THEN
      raise_application_error(-20001, 'Password length less than 8');
   END IF;


   -- Check if the password is same as the username or username(1-100)
   IF NLS_LOWER(password) = NLS_LOWER(username) THEN
     raise_application_error(-20002, 'Password same as or similar to user');
   END IF;
   FOR i IN 1..100 LOOP
      i_char := to_char(i);
      if NLS_LOWER(username)|| i_char = NLS_LOWER(password) THEN
        raise_application_error(-20005, 'Password same as or similar to user name ');
      END IF;
    END LOOP;

   -- Check if the password is same as the username reversed

   FOR i in REVERSE 1..length(username) LOOP
     reverse_user := reverse_user || substr(username, i, 1);
   END LOOP;
   IF NLS_LOWER(password) = NLS_LOWER(reverse_user) THEN
     raise_application_error(-20003, 'Password same as username reversed');
   END IF;

   -- Check if the password is the same as server name and or servername(1-100)
   select name into db_name from sys.v$database;
   if NLS_LOWER(db_name) = NLS_LOWER(password) THEN
      raise_application_error(-20004, 'Password same as or similar to server name');
   END IF;
   FOR i IN 1..100 LOOP
      i_char := to_char(i);
      if NLS_LOWER(db_name)|| i_char = NLS_LOWER(password) THEN
        raise_application_error(-20005, 'Password same as or similar to server name ');
      END IF;
    END LOOP;

   -- Check if the password is too simple. A dictionary of words may be
   -- maintained and a check may be made so as not to allow the words
   -- that are too simple for the password.
   IF NLS_LOWER(password) IN ('welcome1', 'database1', 'account1', 'user1234', 'password1', 'oracle123', 'computer1', 'abcdefg1', 'change_on_install') THEN
      raise_application_error(-20006, 'Password too simple');
   END IF;

   -- Check if the password is the same as oracle (1-100)
    simple_password := 'oracle';
    FOR i IN 1..100 LOOP
      i_char := to_char(i);
      if simple_password || i_char = NLS_LOWER(password) THEN
        raise_application_error(-20007, 'Password too simple ');
      END IF;
    END LOOP;

   -- Check if the password contains at least one letter, one digit
   -- 1. Check for the digit
   isdigit:=FALSE;
   m := length(password);
   FOR i IN 1..10 LOOP
      FOR j IN 1..m LOOP
         IF substr(password,j,1) = substr(digitarray,i,1) THEN
            isdigit:=TRUE;
             GOTO findchar;
         END IF;
      END LOOP;
   END LOOP;

   IF isdigit = FALSE THEN
      raise_application_error(-20008, 'Password must contain at least one digit, one character');
   END IF;
   -- 2. Check for the character
   <>
   ischar:=FALSE;
   FOR i IN 1..length(chararray) LOOP
      FOR j IN 1..m LOOP
         IF substr(password,j,1) = substr(chararray,i,1) THEN
            ischar:=TRUE;
             GOTO endsearch;
         END IF;
      END LOOP;
   END LOOP;
   IF ischar = FALSE THEN
      raise_application_error(-20009, 'Password must contain at least one \
              digit, and one character');
   END IF;


   <>
   -- Check if the password differs from the previous password by at least
   -- 3 letters
   IF old_password IS NOT NULL THEN
     differ := length(old_password) - length(password);

     differ := abs(differ);
     IF differ < 3 THEN
       IF length(password) < length(old_password) THEN
         m := length(password);
       ELSE
         m := length(old_password);
       END IF;

       FOR i IN 1..m LOOP
         IF substr(password,i,1) != substr(old_password,i,1) THEN
           differ := differ + 1;
         END IF;
       END LOOP;

       IF differ < 3 THEN
         raise_application_error(-20011, 'Password should differ from the \
            old password by at least 3 characters');
       END IF;
     END IF;
   END IF;
   -- Everything is fine; return TRUE ;
   RETURN(TRUE);
END;
/
-- new object type path: DATABASE_EXPORT/PROFILE
 ALTER PROFILE "DEFAULT"
    LIMIT
         COMPOSITE_LIMIT UNLIMITED
         SESSIONS_PER_USER UNLIMITED
         CPU_PER_SESSION UNLIMITED
         CPU_PER_CALL UNLIMITED
         LOGICAL_READS_PER_SESSION UNLIMITED
         LOGICAL_READS_PER_CALL UNLIMITED
         IDLE_TIME UNLIMITED
         CONNECT_TIME UNLIMITED
         PRIVATE_SGA UNLIMITED
         FAILED_LOGIN_ATTEMPTS 10
         PASSWORD_LIFE_TIME 15552000/86400
         PASSWORD_REUSE_TIME UNLIMITED
         PASSWORD_REUSE_MAX UNLIMITED
         PASSWORD_VERIFY_FUNCTION "VERIFY_FUNCTION_11G"
         PASSWORD_LOCK_TIME 86400/86400
         PASSWORD_GRACE_TIME 604800/86400 ;
-- new object type path: DATABASE_EXPORT/ROLE
 CREATE ROLE "SELECT_CATALOG_ROLE";

 REVOKE "SELECT_CATALOG_ROLE" FROM SYS;
 CREATE ROLE "EXECUTE_CATALOG_ROLE";

 REVOKE "EXECUTE_CATALOG_ROLE" FROM SYS;
 CREATE ROLE "DELETE_CATALOG_ROLE";

 REVOKE "DELETE_CATALOG_ROLE" FROM SYS;
 CREATE ROLE "DBFS_ROLE";

 REVOKE "DBFS_ROLE" FROM SYS;
 CREATE ROLE "AQ_ADMINISTRATOR_ROLE";

 REVOKE "AQ_ADMINISTRATOR_ROLE" FROM SYS;
 CREATE ROLE "AQ_USER_ROLE";

 REVOKE "AQ_USER_ROLE" FROM SYS;
 CREATE ROLE "ADM_PARALLEL_EXECUTE_TASK";

 REVOKE "ADM_PARALLEL_EXECUTE_TASK" FROM SYS;
 CREATE ROLE "GATHER_SYSTEM_STATISTICS";

 REVOKE "GATHER_SYSTEM_STATISTICS" FROM SYS;
 CREATE ROLE "RECOVERY_CATALOG_OWNER";

 REVOKE "RECOVERY_CATALOG_OWNER" FROM SYS;
 CREATE ROLE "SCHEDULER_ADMIN";

 REVOKE "SCHEDULER_ADMIN" FROM SYS;
 CREATE ROLE "HS_ADMIN_SELECT_ROLE";

 REVOKE "HS_ADMIN_SELECT_ROLE" FROM SYS;
 CREATE ROLE "HS_ADMIN_EXECUTE_ROLE";

 REVOKE "HS_ADMIN_EXECUTE_ROLE" FROM SYS;
 CREATE ROLE "HS_ADMIN_ROLE";

 REVOKE "HS_ADMIN_ROLE" FROM SYS;
 CREATE ROLE "GLOBAL_AQ_USER_ROLE" IDENTIFIED GLOBALLY;
 CREATE ROLE "OEM_ADVISOR";

 REVOKE "OEM_ADVISOR" FROM SYS;
 CREATE ROLE "OEM_MONITOR";

 REVOKE "OEM_MONITOR" FROM SYS;
-- new object type path: DATABASE_EXPORT/SCHEMA/PUBLIC_SYNONYM/SYNONYM
CREATE  PUBLIC SYNONYM "OL$" FOR "SYSTEM"."OL$";
CREATE  PUBLIC SYNONYM "OL$HINTS" FOR "SYSTEM"."OL$HINTS";
CREATE  PUBLIC SYNONYM "OL$NODES" FOR "SYSTEM"."OL$NODES";
CREATE  PUBLIC SYNONYM "PRODUCT_PROFILE" FOR "SYSTEM"."PRODUCT_PRIVS";
CREATE  PUBLIC SYNONYM "PRODUCT_USER_PROFILE" FOR "SYSTEM"."PRODUCT_PRIVS";
CREATE  PUBLIC SYNONYM "PUBLIC_HAWK" FOR "GGS_ADMIN"."OGG$Q_TAB_E_HAWK";

In Oracle Trace, XCTEND indicates the end of a transaction, what indicates its beginning?

Tom Kyte - Sat, 2017-02-11 09:26
We know that a given oracle session we can contain many transactions, each transaction should have a beginning and an end and each wraps many queries within it. While analyzing an Oracle trace file I wanted to make sure that queries are correctly wra...
Categories: DBA Blogs

Need input on Dynamic Update stored procedure with bind variables

Tom Kyte - Sat, 2017-02-11 09:26
Hi AskTom Team, Good day to you! I have some question on some best practice on implementing dynamic update statement using Stored Procedure. I have read many example in this site alone, also other sources, but none seems to answer my specific ques...
Categories: DBA Blogs

Pages

Subscribe to Oracle FAQ aggregator