From 2bbb472016f52c5e282c83a988a5179f1079557a Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 25 Dec 2011 13:48:06 +0100 Subject: [PATCH 01/12] FAQ: add an entry for common error when using -profile --- doc/faq.texi | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/doc/faq.texi b/doc/faq.texi index f3ddbbe811..8044200987 100644 --- a/doc/faq.texi +++ b/doc/faq.texi @@ -266,6 +266,26 @@ avconv -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \ rm temp[12].[av] all.[av] @end example +@section -profile option fails when encoding H.264 video with AAC audio + +@command{avconv} prints an error like + +@example +Undefined constant or missing '(' in 'baseline' +Unable to parse option value "baseline" +Error setting option profile to value baseline. +@end example + +Short answer: write @option{-profile:v} instead of @option{-profile}. + +Long answer: this happens because the @option{-profile} option can apply to both +video and audio. Specifically the AAC encoder also defines some profiles, none +of which are named @var{baseline}. + +The solution is to apply the @option{-profile} option to the video stream only +by using @url{http://libav.org/avconv.html#Stream-specifiers-1, Stream specifiers}. +Appending @code{:v} to it will do exactly that. + @chapter Development @section Are there examples illustrating how to use the Libav libraries, particularly libavcodec and libavformat? From 671005558a295945f5d4cfd1abca6832af479c0b Mon Sep 17 00:00:00 2001 From: Gautam Gupta Date: Sun, 11 Dec 2011 19:02:17 +0100 Subject: [PATCH 02/12] doxy: add website-alike style to the html output Signed-off-by: Luca Barbato --- Doxyfile | 14 +- doc/doxy/doxy_stylesheet.css | 1130 ++++++++++++++++++++++++++++++++++ doc/doxy/footer.html | 22 + doc/doxy/header.html | 17 + 4 files changed, 1179 insertions(+), 4 deletions(-) create mode 100644 doc/doxy/doxy_stylesheet.css create mode 100644 doc/doxy/footer.html create mode 100644 doc/doxy/header.html diff --git a/Doxyfile b/Doxyfile index 722abf9eee..f28fb707e6 100644 --- a/Doxyfile +++ b/Doxyfile @@ -33,6 +33,12 @@ PROJECT_NAME = Libav PROJECT_NUMBER = +# With the PROJECT_LOGO tag one can specify an logo or icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will +# copy the logo to the output directory. +PROJECT_LOGO = + # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location @@ -760,7 +766,7 @@ ALPHABETICAL_INDEX = YES # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) -COLS_IN_ALPHA_INDEX = 5 +COLS_IN_ALPHA_INDEX = 2 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. @@ -794,13 +800,13 @@ HTML_FILE_EXTENSION = .html # each generated HTML page. If it is left blank doxygen will generate a # standard header. -HTML_HEADER = +HTML_HEADER = doc/doxy/header.html # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. -HTML_FOOTER = +HTML_FOOTER = doc/doxy/footer.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to @@ -809,7 +815,7 @@ HTML_FOOTER = # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! -HTML_STYLESHEET = +HTML_STYLESHEET = doc/doxy/doxy_stylesheet.css # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the stylesheet and background images diff --git a/doc/doxy/doxy_stylesheet.css b/doc/doxy/doxy_stylesheet.css new file mode 100644 index 0000000000..a5500b69af --- /dev/null +++ b/doc/doxy/doxy_stylesheet.css @@ -0,0 +1,1130 @@ +/* The standard CSS for doxygen */ + +/* @group Heading Levels */ + + +h1 { + font-size: 150%; +} + +.title { + font-weight: bold; + margin: 10px 2px; + background-color: #7BB37B; + border: 1px solid #6A996A; + color: #151515; + font-size: 1.2em; + padding-bottom: 0.2em; + padding-left: 0.4em; + padding-top: 0.2em; +} + +h2 { + font-size: 120%; +} + +h3 { + font-size: 100%; +} + +dt { + font-weight: bold; +} + +div.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; +} + +p.startli, p.startdd, p.starttd { + margin-top: 2px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.qindex, div.navtab{ + background-color: #EBF6EB; + border: 1px solid #A3D7A3; + text-align: center; +} + +div.qindex, div.navpath { + width: 100%; + line-height: 140%; +} + +div.navtab { + margin-right: 15px; +} + +/* @group Link Styling */ + +a { + color: #3D8C3D; + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: #46A246; +} + +a:hover { + text-decoration: underline; +} + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CD49C; + color: #ffffff; + border: 1px double #86CA86; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code { + color: #4665A2; +} + +a.codeRef { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +.fragment { + font-family: monospace, fixed; + font-size: 105%; +} + +pre.fragment { + border: 1px solid #C4E5C4; + background-color: #FBFDFB; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; +} + +div.ah { + background-color: black; + font-weight: bold; + color: #ffffff; + margin-bottom: 3px; + margin-top: 3px; + padding: 0.2em; + border: solid thin #333; + border-radius: 0.5em; + -webkit-border-radius: .5em; + -moz-border-radius: .5em; + box-shadow: 2px 2px 3px #999; + -webkit-box-shadow: 2px 2px 3px #999; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); + background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +div.contents { + margin-top: 10px; + margin-left: 8px; + margin-right: 8px; +} + +td.indexkey { + background-color: #EBF6EB; + font-weight: bold; + border: 1px solid #C4E5C4; + margin: 2px 0px 2px 0; + padding: 2px 10px; + white-space: nowrap; + vertical-align: top; +} + +td.indexvalue { + background-color: #EBF6EB; + border: 1px solid #C4E5C4; + padding: 2px 10px; + margin: 2px 0px; +} + +tr.memlist { + background-color: #EEF7EE; +} + +p.formulaDsp { + text-align: center; +} + +img.formulaDsp { + +} + +img.formulaInl { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +#footer { + margin: -10px 1em 0; + padding-top: 20px; +} + +address.footer { + background-color: #ffffff; + text-align: center; +} + +img.footer { + border: 0px; + vertical-align: middle; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +/* @end */ + +/* +.search { + color: #003399; + font-weight: bold; +} + +form.search { + margin-bottom: 0px; + margin-top: 0px; +} + +input.search { + font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +*/ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid #A3D7A3; +} + +th.dirtab { + background: #EBF6EB; + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid #4AAA4A; +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: #F9FCF9; + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: #555; +} + +.memItemLeft, .memItemRight, .memTemplParams { + border-top: 1px solid #C4E5C4; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memItemRight { + width: 100%; +} + +.memTemplParams { + color: #46A246; + white-space: nowrap; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtemplate { + font-size: 80%; + color: #46A246; + font-weight: normal; + margin-left: 9px; +} + +.memnav { + background-color: #EBF6EB; + border: 1px solid #A3D7A3; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} + +.mempage { + width: 100%; +} + +.memitem { + padding: 0; + margin-bottom: 10px; + margin-right: 5px; +} + +.memname { + white-space: nowrap; + font-weight: bold; + margin-left: 6px; +} + +.memproto, dl.reflist dt { + border-top: 1px solid #A8D9A8; + border-left: 1px solid #A8D9A8; + border-right: 1px solid #A8D9A8; + padding: 6px 0px 6px 0px; + color: #255525; + font-weight: bold; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + /* opera specific markup */ + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + border-top-right-radius: 8px; + border-top-left-radius: 8px; + /* firefox specific markup */ + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + -moz-border-radius-topright: 8px; + -moz-border-radius-topleft: 8px; + /* webkit specific markup */ + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-border-top-right-radius: 8px; + -webkit-border-top-left-radius: 8px; + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2F2E2; + +} + +.memdoc, dl.reflist dd { + border-bottom: 1px solid #A8D9A8; + border-left: 1px solid #A8D9A8; + border-right: 1px solid #A8D9A8; + padding: 2px 5px; + background-color: #FBFDFB; + border-top-width: 0; + /* opera specific markup */ + border-bottom-left-radius: 8px; + border-bottom-right-radius: 8px; + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + /* firefox specific markup */ + -moz-border-radius-bottomleft: 8px; + -moz-border-radius-bottomright: 8px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + background-image: -moz-linear-gradient(center top, #FFFFFF 0%, #FFFFFF 60%, #F7FBF7 95%, #EEF7EE); + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 8px; + -webkit-border-bottom-right-radius: 8px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + background-image: -webkit-gradient(linear,center top,center bottom,from(#FFFFFF), color-stop(0.6,#FFFFFF), color-stop(0.60,#FFFFFF), color-stop(0.95,#F7FBF7), to(#EEF7EE)); +} + +dl.reflist dt { + padding: 5px; +} + +dl.reflist dd { + margin: 0px 0px 10px 0px; + padding: 5px; +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; +} + +.paramname { + color: #602020; + white-space: nowrap; +} +.paramname em { + font-style: normal; +} + +.params, .retval, .exception, .tparams { + border-spacing: 6px 2px; +} + +.params .paramname, .retval .paramname { + font-weight: bold; + vertical-align: top; +} + +.params .paramtype { + font-style: italic; + vertical-align: top; +} + +.params .paramdir { + font-family: "courier new",courier,monospace; + vertical-align: top; +} + + + + +/* @end */ + +/* @group Directory (tree) */ + +/* for the tree view */ + +.ftvtree { + font-family: sans-serif; + margin: 0px; +} + +/* these are for tree view when used as main index */ + +.directory { + font-size: 9pt; + font-weight: bold; + margin: 5px; +} + +.directory h3 { + margin: 0px; + margin-top: 1em; + font-size: 11pt; +} + +/* +The following two styles can be used to replace the root node title +with an image of your choice. Simply uncomment the next two styles, +specify the name of your image and be sure to set 'height' to the +proper pixel height of your image. +*/ + +/* +.directory h3.swap { + height: 61px; + background-repeat: no-repeat; + background-image: url("yourimage.gif"); +} +.directory h3.swap span { + display: none; +} +*/ + +.directory > h3 { + margin-top: 0; +} + +.directory p { + margin: 0px; + white-space: nowrap; +} + +.directory div { + display: none; + margin: 0px; +} + +.directory img { + vertical-align: -30%; +} + +/* these are for tree view when not used as main index */ + +.directory-alt { + font-size: 100%; + font-weight: bold; +} + +.directory-alt h3 { + margin: 0px; + margin-top: 1em; + font-size: 11pt; +} + +.directory-alt > h3 { + margin-top: 0; +} + +.directory-alt p { + margin: 0px; + white-space: nowrap; +} + +.directory-alt div { + display: none; + margin: 0px; +} + +.directory-alt img { + vertical-align: -30%; +} + +/* @end */ + +div.dynheader { + margin-top: 8px; +} + +address { + font-style: normal; + color: #2A612A; +} + +table.doxtable { + border-collapse:collapse; +} + +table.doxtable td, table.doxtable th { + border: 1px solid #2D682D; + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: #377F37; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; +} + +table.fieldtable { + width: 100%; + margin-bottom: 10px; + border: 1px solid #A8D9A8; + border-spacing: 0px; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + border-radius: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); + box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); +} + +.fieldtable td, .fieldtable th { + padding: 3px 7px 2px; +} + +.fieldtable td.fieldtype, .fieldtable td.fieldname { + white-space: nowrap; + border-right: 1px solid #A8D9A8; + border-bottom: 1px solid #A8D9A8; + vertical-align: top; +} + +.fieldtable td.fielddoc { + border-bottom: 1px solid #A8D9A8; + width: 100%; +} + +.fieldtable tr:last-child td { + border-bottom: none; +} + +.fieldtable th { + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2F2E2; + font-size: 90%; + color: #255525; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; + -moz-border-radius-topleft: 4px; + -moz-border-radius-topright: 4px; + -webkit-border-top-left-radius: 4px; + -webkit-border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom: 1px solid #A8D9A8; +} + + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: url('tab_b.png'); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul +{ + font-size: 11px; + background-image:url('tab_b.png'); + background-repeat:repeat-x; + height:30px; + line-height:30px; + color:#8ACC8A; + border:solid 1px #C2E4C2; + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right:15px; + background-image:url('bc_s.png'); + background-repeat:no-repeat; + background-position:right; + color:#367C36; +} + +.navpath li.navelem a +{ + height:32px; + display:block; + text-decoration: none; + outline: none; +} + +.navpath li.navelem a:hover +{ + color:#68BD68; +} + +.navpath li.footer +{ + list-style-type:none; + float:right; + padding-left:10px; + padding-right:15px; + background-image:none; + background-repeat:no-repeat; + background-position:right; + color:#367C36; + font-size: 8pt; +} + + +div.summary +{ + margin-top: 12px; + text-align: center; +} + +div.summary a +{ + white-space: nowrap; +} + +div.ingroups +{ + margin-left: 5px; + font-size: 8pt; + padding-left: 5px; + width: 50%; + text-align: left; +} + +div.ingroups a +{ + white-space: nowrap; +} + +div.headertitle +{ + padding: 5px 5px 5px 7px; +} + +dl +{ + padding: 0 0 0 10px; +} + +dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug +{ + border-left:4px solid; + padding: 0 0 0 6px; +} + +dl.note +{ + border-color: #D0C000; +} + +dl.warning, dl.attention +{ + border-color: #FF0000; +} + +dl.pre, dl.post, dl.invariant +{ + border-color: #00D000; +} + +dl.deprecated +{ + border-color: #505050; +} + +dl.todo +{ + border-color: #00C0E0; +} + +dl.test +{ + border-color: #3030E0; +} + +dl.bug +{ + border-color: #C08050; +} + +#projectlogo +{ + text-align: center; + vertical-align: bottom; + border-collapse: separate; +} + +#projectlogo img +{ + border: 0px none; +} + +#projectname +{ + font: 300% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 2px 0px; +} + +#projectbrief +{ + font: 120% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#projectnumber +{ + font: 50% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#titlearea +{ + padding: 0px; + margin: 0px; + width: 100%; + border-bottom: 1px solid #53B453; +} + +.image +{ + text-align: center; +} + +.dotgraph +{ + text-align: center; +} + +.mscgraph +{ + text-align: center; +} + +.caption +{ + font-weight: bold; +} + +div.zoom +{ + border: 1px solid #90CE90; +} + +dl.citelist { + margin-bottom:50px; +} + +dl.citelist dt { + color:#337533; + float:left; + font-weight:bold; + margin-right:10px; + padding:5px; +} + +dl.citelist dd { + margin:2px 0; + padding:5px 0; +} + +@media print +{ + #top { display: none; } + #side-nav { display: none; } + #nav-path { display: none; } + body { overflow:visible; } + h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } + .summary { display: none; } + .memitem { page-break-inside: avoid; } + #doc-content + { + margin-left:0 !important; + height:auto !important; + width:auto !important; + overflow:inherit; + display:inline; + } + pre.fragment + { + overflow: visible; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + } +} + +/* tabs */ + +.tabs, .tabs2, .tabs3 { + z-index: 101; +} + +.tablist { + margin: auto; + display: table; +} + +.tablist li { + float: left; + display: table-cell; + list-style: none; + margin:0 4px; +} + +.tablist a { + display: block; + padding: 0 0.3em; + color: #285D28; + text-decoration: none; + outline: none; +} + +.tabs3 .tablist a { + padding: 0 10px; +} + + +/* libav.org stylesheet */ + +a { + color: #2D6198; +} + +a:visited { + color: #884488; +} + +h1 a, h2 a, h3 a { + text-decoration: inherit; + color: inherit; +} + +#banner, #top { + background-color: #BBC9D8; + border-bottom: 1px solid #7A96B3; + border-top: 1px solid #7A96B3; + position: relative; + text-align: center; +} + +#banner img, #top img { + padding-bottom: 1px; + padding-top: 5px; +} + +#body { + margin: 0 1em; +} + +body { + background-color: #313131; + margin: 0; +} + +.center { + margin-left: auto; + margin-right: auto; + text-align: center; +} + +#container { + background-color: white; + color: #202020; + margin-left: 1em; + margin-right: 1em; +} + +h1 { + background-color: #7BB37B; + border: 1px solid #6A996A; + color: #151515; + font-size: 1.2em; + padding-bottom: 0.2em; + padding-left: 0.4em; + padding-top: 0.2em; +} + +h2 { + color: #313131; + font-size: 1.2em; +} + +h3 { + color: #313131; + font-size: 0.8em; + margin-bottom: -8px; +} + +img { + border: 0; +} + +#navrow1 { + margin-top: 12px; + border-top: 1px solid #5C665C; +} + +#navrow1, #navrow2, #navrow3, #navrow4 { + background-color: #738073; + border-bottom: 1px solid #5C665C; + border-left: 1px solid #5C665C; + border-right: 1px solid #5C665C; + position: relative; + text-align: center; +} + +#navrow1 a, #navrow2 a, #navrow3 a, #navrow4 a { + color: white; + padding: 0.3em; + text-decoration: none; +} + + +#navrow1 ul, #navrow2 ul, #navrow3 ul, #navrow4 ul { + padding: 0; +} + +#navrow1 li.current a, #navrow2 li.current a, #navrow3 li.current a, #navrow4 li.current a { + background-color: #414141; + color: white; + text-decoration: none; +} + +#navrow1 a:hover, #navrow2 a:hover, #navrow3 a:hover, #navrow4 a:hover { + background-color: #313131 !important; + color: white; + text-decoration: none; +} + +p { + margin-left: 1em; + margin-right: 1em; +} + +table { + margin-left: 2em; +} + +pre { + margin-left: 2em; +} + +#proj_desc { + font-size: 1.2em; +} + +#repos { + margin-left: 1em; + margin-right: 1em; + border-collapse: collapse; + border: solid 1px #6A996A; +} + +#repos th { + background-color: #7BB37B; + border: solid 1px #6A996A; +} + +#repos td { + padding: 0.2em; + border: solid 1px #6A996A; +} + +#distro_status { + margin-left: 1em; + margin-right: 1em; + border-collapse: collapse; + border: solid 1px #6A996A; +} + +#distro_status th { + background-color: #7BB37B; + border: solid 1px #6A996A; +} + +#distro_status td { + padding: 0.2em; + border: solid 1px #6A996A; +} diff --git a/doc/doxy/footer.html b/doc/doxy/footer.html new file mode 100644 index 0000000000..1bff6debe1 --- /dev/null +++ b/doc/doxy/footer.html @@ -0,0 +1,22 @@ + + + + + + + + diff --git a/doc/doxy/header.html b/doc/doxy/header.html new file mode 100644 index 0000000000..97b7234456 --- /dev/null +++ b/doc/doxy/header.html @@ -0,0 +1,17 @@ + + + + + +$projectname: $title +$title + +$treeview +$search +$mathjax + + +
+ +
+
From b58dbb5b031c33cdb88f13cc533f623e82cdbcbd Mon Sep 17 00:00:00 2001 From: Stefano Sabatini Date: Fri, 29 Apr 2011 13:04:47 +0200 Subject: [PATCH 03/12] lavc: add a sample_aspect_ratio field to AVFrame The sample aspect ratio is a per-frame property, so it makes sense to define it in AVFrame rather than in the codec/stream context. Simplify application-level sample aspect ratio information extraction, and allow further simplifications. --- avplay.c | 2 +- cmdutils.c | 1 + libavcodec/avcodec.h | 7 +++++++ libavcodec/pthread.c | 1 + libavcodec/utils.c | 2 ++ libavfilter/vsrc_movie.c | 4 ++-- 6 files changed, 14 insertions(+), 3 deletions(-) diff --git a/avplay.c b/avplay.c index bf1ac1b3f2..bf246f2382 100644 --- a/avplay.c +++ b/avplay.c @@ -1651,7 +1651,7 @@ static int input_request_frame(AVFilterLink *link) picref->pts = pts; picref->pos = pkt.pos; - picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio; + picref->video->pixel_aspect = priv->frame->sample_aspect_ratio; avfilter_start_frame(link, picref); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); diff --git a/cmdutils.c b/cmdutils.c index 1c2bf4696b..c5c2c1ced0 100644 --- a/cmdutils.c +++ b/cmdutils.c @@ -1047,6 +1047,7 @@ int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame, frame->top_field_first = picref->video->top_field_first; frame->key_frame = picref->video->key_frame; frame->pict_type = picref->video->pict_type; + frame->sample_aspect_ratio = picref->video->pixel_aspect; return 1; } diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 5d39b98123..eb6826bcb0 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -1258,6 +1258,13 @@ typedef struct AVFrame { * decoding: set by AVCodecContext.get_buffer() */ uint8_t **extended_data; + + /** + * sample aspect ratio for the video frame, 0/1 if unknown\unspecified + * - encoding: unused + * - decoding: Read by user. + */ + AVRational sample_aspect_ratio; } AVFrame; struct AVCodecInternal; diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index f842edf861..17dd12da6c 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -599,6 +599,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, *picture = p->frame; *got_picture_ptr = p->got_frame; picture->pkt_dts = p->avpkt.dts; + picture->sample_aspect_ratio = avctx->sample_aspect_ratio; /* * A later call with avkpt->size == 0 may loop over all threads, diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7e9ddb2e35..7c1a7aea2a 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -584,6 +584,7 @@ void avcodec_get_frame_defaults(AVFrame *pic){ pic->pts= AV_NOPTS_VALUE; pic->key_frame= 1; + pic->sample_aspect_ratio = (AVRational){0, 1}; } AVFrame *avcodec_alloc_frame(void){ @@ -858,6 +859,7 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi ret = avctx->codec->decode(avctx, picture, got_picture_ptr, avpkt); picture->pkt_dts= avpkt->dts; + picture->sample_aspect_ratio = avctx->sample_aspect_ratio; } emms_c(); //needed to avoid an emms_c() call before every return; diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c index dec499904e..a1764732b6 100644 --- a/libavfilter/vsrc_movie.c +++ b/libavfilter/vsrc_movie.c @@ -248,8 +248,8 @@ static int movie_get_frame(AVFilterLink *outlink) movie->frame->pkt_dts : movie->frame->pkt_pts; movie->picref->pos = movie->frame->reordered_opaque; - movie->picref->video->pixel_aspect = st->sample_aspect_ratio.num ? - st->sample_aspect_ratio : movie->codec_ctx->sample_aspect_ratio; + if (!movie->frame->sample_aspect_ratio.num) + movie->picref->video->pixel_aspect = st->sample_aspect_ratio; movie->picref->video->interlaced = movie->frame->interlaced_frame; movie->picref->video->top_field_first = movie->frame->top_field_first; movie->picref->video->key_frame = movie->frame->key_frame; From 3a2ddf7c2c4d27b595a601c55af23129a5a8be0d Mon Sep 17 00:00:00 2001 From: Stefano Sabatini Date: Sun, 1 May 2011 14:02:08 +0200 Subject: [PATCH 04/12] lavc: add width and height fields to AVFrame width and height are per-frame properties, setting these values in AVFrame simplify the operation of extraction of that information, since avoids the need to check the codec/stream context. --- libavcodec/avcodec.h | 7 +++++++ libavcodec/pthread.c | 2 ++ libavcodec/utils.c | 2 ++ 3 files changed, 11 insertions(+) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index eb6826bcb0..0204af7df0 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -1265,6 +1265,13 @@ typedef struct AVFrame { * - decoding: Read by user. */ AVRational sample_aspect_ratio; + + /** + * width and height of the video frame + * - encoding: unused + * - decoding: Read by user. + */ + int width, height; } AVFrame; struct AVCodecInternal; diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index 17dd12da6c..1ec2d1a1f9 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -600,6 +600,8 @@ int ff_thread_decode_frame(AVCodecContext *avctx, *got_picture_ptr = p->got_frame; picture->pkt_dts = p->avpkt.dts; picture->sample_aspect_ratio = avctx->sample_aspect_ratio; + picture->width = avctx->width; + picture->height = avctx->height; /* * A later call with avkpt->size == 0 may loop over all threads, diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7c1a7aea2a..74932ed71d 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -860,6 +860,8 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi avpkt); picture->pkt_dts= avpkt->dts; picture->sample_aspect_ratio = avctx->sample_aspect_ratio; + picture->width = avctx->width; + picture->height = avctx->height; } emms_c(); //needed to avoid an emms_c() call before every return; From 8a4a5f6ff756fdba44254015c714f173b2db6f64 Mon Sep 17 00:00:00 2001 From: Stefano Sabatini Date: Sun, 1 May 2011 14:10:20 +0200 Subject: [PATCH 05/12] lavc: add format field to AVFrame The format is a per-frame property, having it in AVFrame simplify the operation of extraction of that information, since avoids the need to access the codec/stream context. --- libavcodec/avcodec.h | 9 +++++++++ libavcodec/pthread.c | 1 + libavcodec/utils.c | 4 ++++ 3 files changed, 14 insertions(+) diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 0204af7df0..4f24f729ac 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -1272,6 +1272,15 @@ typedef struct AVFrame { * - decoding: Read by user. */ int width, height; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum PixelFormat for video frames, + * enum AVSampleFormat for audio) + * - encoding: unused + * - decoding: Read by user. + */ + int format; } AVFrame; struct AVCodecInternal; diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index 1ec2d1a1f9..a44500b036 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -602,6 +602,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, picture->sample_aspect_ratio = avctx->sample_aspect_ratio; picture->width = avctx->width; picture->height = avctx->height; + picture->format = avctx->pix_fmt; /* * A later call with avkpt->size == 0 may loop over all threads, diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 74932ed71d..a88d1a7229 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -585,6 +585,7 @@ void avcodec_get_frame_defaults(AVFrame *pic){ pic->pts= AV_NOPTS_VALUE; pic->key_frame= 1; pic->sample_aspect_ratio = (AVRational){0, 1}; + pic->format = -1; /* unknown */ } AVFrame *avcodec_alloc_frame(void){ @@ -862,6 +863,7 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi picture->sample_aspect_ratio = avctx->sample_aspect_ratio; picture->width = avctx->width; picture->height = avctx->height; + picture->format = avctx->pix_fmt; } emms_c(); //needed to avoid an emms_c() call before every return; @@ -983,6 +985,8 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, if (ret >= 0 && *got_frame_ptr) { avctx->frame_number++; frame->pkt_dts = avpkt->dts; + if (frame->format == AV_SAMPLE_FMT_NONE) + frame->format = avctx->sample_fmt; } } return ret; From 1c9e340d35351858907f11c45b2691db708f3903 Mon Sep 17 00:00:00 2001 From: Stefano Sabatini Date: Sun, 1 May 2011 14:47:05 +0200 Subject: [PATCH 06/12] lavfi: add avfilter_copy_frame_props() avfilter_copy_frame_props() avoids code duplication and increases robustness. --- avplay.c | 4 ++-- libavfilter/avfilter.c | 19 +++++++++++++++++++ libavfilter/avfilter.h | 9 +++++++++ libavfilter/vsrc_buffer.c | 5 +---- libavfilter/vsrc_movie.c | 5 +---- 5 files changed, 32 insertions(+), 10 deletions(-) diff --git a/avplay.c b/avplay.c index bf246f2382..c21701081d 100644 --- a/avplay.c +++ b/avplay.c @@ -1649,9 +1649,9 @@ static int input_request_frame(AVFilterLink *link) } av_free_packet(&pkt); + avfilter_copy_frame_props(picref, priv->frame); picref->pts = pts; - picref->pos = pkt.pos; - picref->video->pixel_aspect = priv->frame->sample_aspect_ratio; + avfilter_start_frame(link, picref); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index b0304d64d5..d42659112a 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -25,6 +25,7 @@ #include "libavutil/rational.h" #include "libavutil/audioconvert.h" #include "libavutil/imgutils.h" +#include "libavcodec/avcodec.h" #include "avfilter.h" #include "internal.h" @@ -681,3 +682,21 @@ int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque return ret; } +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src) +{ + if (dst->type != AVMEDIA_TYPE_VIDEO) + return AVERROR(EINVAL); + + dst->pts = src->pts; + dst->format = src->format; + + dst->video->w = src->width; + dst->video->h = src->height; + dst->video->pixel_aspect = src->sample_aspect_ratio; + dst->video->interlaced = src->interlaced_frame; + dst->video->top_field_first = src->top_field_first; + dst->video->key_frame = src->key_frame; + dst->video->pict_type = src->pict_type; + + return 0; +} diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index e0c664dd29..a5cc8b2569 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -27,6 +27,7 @@ #include "libavutil/samplefmt.h" #include "libavutil/pixfmt.h" #include "libavutil/rational.h" +#include "libavcodec/avcodec.h" #define LIBAVFILTER_VERSION_MAJOR 2 #define LIBAVFILTER_VERSION_MINOR 13 @@ -862,4 +863,12 @@ static inline void avfilter_insert_outpad(AVFilterContext *f, unsigned index, &f->output_pads, &f->outputs, p); } +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + #endif /* AVFILTER_AVFILTER_H */ diff --git a/libavfilter/vsrc_buffer.c b/libavfilter/vsrc_buffer.c index 1f0233e3e3..982fed5f7a 100644 --- a/libavfilter/vsrc_buffer.c +++ b/libavfilter/vsrc_buffer.c @@ -131,12 +131,9 @@ static int request_frame(AVFilterLink *link) c->frame.data, c->frame.linesize, picref->format, link->w, link->h); + avfilter_copy_frame_props(picref, &c->frame); picref->pts = c->pts; picref->video->pixel_aspect = c->pixel_aspect; - picref->video->interlaced = c->frame.interlaced_frame; - picref->video->top_field_first = c->frame.top_field_first; - picref->video->key_frame = c->frame.key_frame; - picref->video->pict_type = c->frame.pict_type; avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); diff --git a/libavfilter/vsrc_movie.c b/libavfilter/vsrc_movie.c index a1764732b6..4ac079c5a2 100644 --- a/libavfilter/vsrc_movie.c +++ b/libavfilter/vsrc_movie.c @@ -240,6 +240,7 @@ static int movie_get_frame(AVFilterLink *outlink) av_image_copy(movie->picref->data, movie->picref->linesize, movie->frame->data, movie->frame->linesize, movie->picref->format, outlink->w, outlink->h); + avfilter_copy_frame_props(movie->picref, movie->frame); /* FIXME: use a PTS correction mechanism as that in * ffplay.c when some API will be available for that */ @@ -250,10 +251,6 @@ static int movie_get_frame(AVFilterLink *outlink) movie->picref->pos = movie->frame->reordered_opaque; if (!movie->frame->sample_aspect_ratio.num) movie->picref->video->pixel_aspect = st->sample_aspect_ratio; - movie->picref->video->interlaced = movie->frame->interlaced_frame; - movie->picref->video->top_field_first = movie->frame->top_field_first; - movie->picref->video->key_frame = movie->frame->key_frame; - movie->picref->video->pict_type = movie->frame->pict_type; av_dlog(outlink->src, "movie_get_frame(): file:'%s' pts:%"PRId64" time:%lf pos:%"PRId64" aspect:%d/%d\n", movie->file_name, movie->picref->pts, From e1d9dbf2d465448028bf396d7b37dbb642794678 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Wed, 21 Dec 2011 20:39:18 +0100 Subject: [PATCH 07/12] lavfi: add a new function av_buffersrc_buffer(). It can be used to directly pass a AVFilterBufferRef to lavfi, avoiding a memcpy. --- libavfilter/Makefile | 2 +- libavfilter/buffersrc.h | 38 ++++++++++++++++++++++ libavfilter/vsrc_buffer.c | 66 +++++++++++++++++++-------------------- 3 files changed, 72 insertions(+), 34 deletions(-) create mode 100644 libavfilter/buffersrc.h diff --git a/libavfilter/Makefile b/libavfilter/Makefile index c269db5797..78c4692614 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -3,7 +3,7 @@ FFLIBS = avutil FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec FFLIBS-$(CONFIG_SCALE_FILTER) += swscale -HEADERS = avfilter.h avfiltergraph.h vsrc_buffer.h +HEADERS = avfilter.h avfiltergraph.h buffersrc.h vsrc_buffer.h OBJS = allfilters.o \ avfilter.o \ diff --git a/libavfilter/buffersrc.h b/libavfilter/buffersrc.h new file mode 100644 index 0000000000..bd82c065e4 --- /dev/null +++ b/libavfilter/buffersrc.h @@ -0,0 +1,38 @@ +/* + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * Memory buffer source API. + */ + +#include "avfilter.h" + +/** + * Add a buffer to the filtergraph s. + * + * @param buf buffer containing frame data to be passed down the filtergraph. + * This function will take ownership of buf, the user must not free it. + */ +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/libavfilter/vsrc_buffer.c b/libavfilter/vsrc_buffer.c index 982fed5f7a..7ef19a1140 100644 --- a/libavfilter/vsrc_buffer.c +++ b/libavfilter/vsrc_buffer.c @@ -24,13 +24,12 @@ */ #include "avfilter.h" +#include "buffersrc.h" #include "vsrc_buffer.h" #include "libavutil/imgutils.h" typedef struct { - int64_t pts; - AVFrame frame; - int has_frame; + AVFilterBufferRef *buf; int h, w; enum PixelFormat pix_fmt; AVRational time_base; ///< time_base to set in the output link @@ -42,7 +41,7 @@ int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, { BufferSourceContext *c = buffer_filter->priv; - if (c->has_frame) { + if (c->buf) { av_log(buffer_filter, AV_LOG_ERROR, "Buffering several frames is not supported. " "Please consume all available frames before adding a new one.\n" @@ -50,15 +49,31 @@ int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame, //return -1; } - memcpy(c->frame.data , frame->data , sizeof(frame->data)); - memcpy(c->frame.linesize, frame->linesize, sizeof(frame->linesize)); - c->frame.interlaced_frame= frame->interlaced_frame; - c->frame.top_field_first = frame->top_field_first; - c->frame.key_frame = frame->key_frame; - c->frame.pict_type = frame->pict_type; - c->pts = pts; - c->pixel_aspect = pixel_aspect; - c->has_frame = 1; + c->buf = avfilter_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE, + c->w, c->h); + av_image_copy(c->buf->data, c->buf->linesize, frame->data, frame->linesize, + c->pix_fmt, c->w, c->h); + + avfilter_copy_frame_props(c->buf, frame); + c->buf->pts = pts; + c->buf->video->pixel_aspect = pixel_aspect; + + return 0; +} + +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf) +{ + BufferSourceContext *c = s->priv; + + if (c->buf) { + av_log(s, AV_LOG_ERROR, + "Buffering several frames is not supported. " + "Please consume all available frames before adding a new one.\n" + ); + return AVERROR(EINVAL); + } + + c->buf = buf; return 0; } @@ -113,33 +128,18 @@ static int config_props(AVFilterLink *link) static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; - AVFilterBufferRef *picref; - if (!c->has_frame) { + if (!c->buf) { av_log(link->src, AV_LOG_ERROR, "request_frame() called with no available frame!\n"); //return -1; } - /* This picture will be needed unmodified later for decoding the next - * frame */ - picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE | - AV_PERM_REUSE2, - link->w, link->h); - - av_image_copy(picref->data, picref->linesize, - c->frame.data, c->frame.linesize, - picref->format, link->w, link->h); - - avfilter_copy_frame_props(picref, &c->frame); - picref->pts = c->pts; - picref->video->pixel_aspect = c->pixel_aspect; - avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0)); + avfilter_start_frame(link, avfilter_ref_buffer(c->buf, ~0)); avfilter_draw_slice(link, 0, link->h, 1); avfilter_end_frame(link); - avfilter_unref_buffer(picref); - - c->has_frame = 0; + avfilter_unref_buffer(c->buf); + c->buf = NULL; return 0; } @@ -147,7 +147,7 @@ static int request_frame(AVFilterLink *link) static int poll_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; - return !!(c->has_frame); + return !!(c->buf); } AVFilter avfilter_vsrc_buffer = { From 64dca32cdf925b7f6b1308479924b53a1681597b Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Mon, 19 Dec 2011 15:44:58 +0100 Subject: [PATCH 08/12] avconv: implement get_buffer()/release_buffer(). This will allow memcpy-free passing frames to lavfi. --- avconv.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/avconv.c b/avconv.c index dfef98f51f..d0895129ac 100644 --- a/avconv.c +++ b/avconv.c @@ -44,6 +44,7 @@ #include "libavutil/pixdesc.h" #include "libavutil/avstring.h" #include "libavutil/libm.h" +#include "libavutil/imgutils.h" #include "libavformat/os_support.h" #if CONFIG_AVFILTER @@ -139,6 +140,19 @@ static unsigned int allocated_audio_out_size, allocated_audio_buf_size; #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" +typedef struct FrameBuffer { + uint8_t *base[4]; + uint8_t *data[4]; + int linesize[4]; + + int h, w; + enum PixelFormat pix_fmt; + + int refcount; + struct InputStream *ist; + struct FrameBuffer *next; +} FrameBuffer; + typedef struct InputStream { int file_index; AVStream *st; @@ -157,6 +171,9 @@ typedef struct InputStream { int is_start; /* is 1 at the start and after a discontinuity */ int showed_multi_packet_warning; AVDictionary *opts; + + /* a pool of free buffers for decoded data */ + FrameBuffer *buffer_pool; } InputStream; typedef struct InputFile { @@ -394,6 +411,124 @@ static void reset_options(OptionsContext *o) init_opts(); } +static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf) +{ + AVCodecContext *s = ist->st->codec; + FrameBuffer *buf = av_mallocz(sizeof(*buf)); + int ret; + const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; + int h_chroma_shift, v_chroma_shift; + int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1 + int w = s->width, h = s->height; + + if (!buf) + return AVERROR(ENOMEM); + + if (!(s->flags & CODEC_FLAG_EMU_EDGE)) { + w += 2*edge; + h += 2*edge; + } + + avcodec_align_dimensions(s, &w, &h); + if ((ret = av_image_alloc(buf->base, buf->linesize, w, h, + s->pix_fmt, 32)) < 0) { + av_freep(&buf); + return ret; + } + /* XXX this shouldn't be needed, but some tests break without this line + * those decoders are buggy and need to be fixed. + * the following tests fail: + * bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit + */ + memset(buf->base[0], 128, ret); + + avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); + for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { + const int h_shift = i==0 ? 0 : h_chroma_shift; + const int v_shift = i==0 ? 0 : v_chroma_shift; + if (s->flags & CODEC_FLAG_EMU_EDGE) + buf->data[i] = buf->base[i]; + else + buf->data[i] = buf->base[i] + + FFALIGN((buf->linesize[i]*edge >> v_shift) + + (pixel_size*edge >> h_shift), 32); + } + buf->w = s->width; + buf->h = s->height; + buf->pix_fmt = s->pix_fmt; + buf->ist = ist; + + *pbuf = buf; + return 0; +} + +static void free_buffer_pool(InputStream *ist) +{ + FrameBuffer *buf = ist->buffer_pool; + while (buf) { + ist->buffer_pool = buf->next; + av_freep(&buf->base[0]); + av_free(buf); + buf = ist->buffer_pool; + } +} + +static void unref_buffer(InputStream *ist, FrameBuffer *buf) +{ + av_assert0(buf->refcount); + buf->refcount--; + if (!buf->refcount) { + buf->next = ist->buffer_pool; + ist->buffer_pool = buf; + } +} + +static int codec_get_buffer(AVCodecContext *s, AVFrame *frame) +{ + InputStream *ist = s->opaque; + FrameBuffer *buf; + int ret, i; + + if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0) + return ret; + + buf = ist->buffer_pool; + ist->buffer_pool = buf->next; + buf->next = NULL; + if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) { + av_freep(&buf->base[0]); + av_free(buf); + if ((ret = alloc_buffer(ist, &buf)) < 0) + return ret; + } + buf->refcount++; + + frame->opaque = buf; + frame->type = FF_BUFFER_TYPE_USER; + frame->extended_data = frame->data; + frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE; + + for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { + frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't + frame->data[i] = buf->data[i]; + frame->linesize[i] = buf->linesize[i]; + } + + return 0; +} + +static void codec_release_buffer(AVCodecContext *s, AVFrame *frame) +{ + InputStream *ist = s->opaque; + FrameBuffer *buf = frame->opaque; + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++) + frame->data[i] = NULL; + + unref_buffer(ist, buf); +} + #if CONFIG_AVFILTER static int configure_video_filters(InputStream *ist, OutputStream *ost) @@ -531,6 +666,7 @@ void exit_program(int ret) av_freep(&input_streams[i].decoded_frame); av_freep(&input_streams[i].filtered_frame); av_dict_free(&input_streams[i].opts); + free_buffer_pool(&input_streams[i]); } if (vstats_file) @@ -1985,6 +2121,12 @@ static int init_input_stream(int ist_index, OutputStream *output_streams, int nb } } + if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) { + ist->st->codec->get_buffer = codec_get_buffer; + ist->st->codec->release_buffer = codec_release_buffer; + ist->st->codec->opaque = ist; + } + if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d", ist->file_index, ist->st->index); From 04a14d4d25517eccd0a4ae91c42a34edbdc4be6f Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Wed, 21 Dec 2011 21:04:05 +0100 Subject: [PATCH 09/12] avconv: avoid memcpy in vsrc_buffer when possible. Decoding Sintel.2010.1080p.mkv with 4 threads on an i7 2600K goes from 260s to 244s. --- avconv.c | 52 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/avconv.c b/avconv.c index d0895129ac..4ac7072ed6 100644 --- a/avconv.c +++ b/avconv.c @@ -50,6 +50,7 @@ #if CONFIG_AVFILTER # include "libavfilter/avfilter.h" # include "libavfilter/avfiltergraph.h" +# include "libavfilter/buffersrc.h" # include "libavfilter/vsrc_buffer.h" #endif @@ -529,6 +530,13 @@ static void codec_release_buffer(AVCodecContext *s, AVFrame *frame) unref_buffer(ist, buf); } +static void filter_release_buffer(AVFilterBuffer *fb) +{ + FrameBuffer *buf = fb->priv; + av_free(fb); + unref_buffer(buf->ist, buf); +} + #if CONFIG_AVFILTER static int configure_video_filters(InputStream *ist, OutputStream *ost) @@ -1915,21 +1923,35 @@ static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int continue; #if CONFIG_AVFILTER - if (ost->input_video_filter) { - AVRational sar; - if (ist->st->sample_aspect_ratio.num) - sar = ist->st->sample_aspect_ratio; - else - sar = ist->st->codec->sample_aspect_ratio; - av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, ist->pts, sar); - if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { - av_free(buffer_to_free); - return AVERROR(ENOMEM); - } else - avcodec_get_frame_defaults(ist->filtered_frame); - filtered_frame = ist->filtered_frame; - frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); - } + if (ist->st->sample_aspect_ratio.num) + decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; + if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) { + FrameBuffer *buf = decoded_frame->opaque; + AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( + decoded_frame->data, decoded_frame->linesize, + AV_PERM_READ | AV_PERM_PRESERVE, + ist->st->codec->width, ist->st->codec->height, + ist->st->codec->pix_fmt); + + avfilter_copy_frame_props(fb, decoded_frame); + fb->pts = ist->pts; + fb->buf->priv = buf; + fb->buf->free = filter_release_buffer; + + buf->refcount++; + av_buffersrc_buffer(ost->input_video_filter, fb); + } else + av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, + ist->pts, decoded_frame->sample_aspect_ratio); + + if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { + av_free(buffer_to_free); + return AVERROR(ENOMEM); + } else + avcodec_get_frame_defaults(ist->filtered_frame); + filtered_frame = ist->filtered_frame; + + frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]); while (frame_available) { AVRational ist_pts_tb; if (ost->output_video_filter) From c65dfac466c2248a5a099a4fb9d421a9f647da03 Mon Sep 17 00:00:00 2001 From: Konstantin Todorov Date: Sun, 25 Dec 2011 09:58:01 -0800 Subject: [PATCH 10/12] mpegvideo.c: K&R formatting and cosmetics. Signed-off-by: Ronald S. Bultje --- libavcodec/mpegvideo.c | 1207 ++++++++++++++++++++++------------------ 1 file changed, 665 insertions(+), 542 deletions(-) diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index d190606eff..214b64ec3b 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -1009,7 +1009,7 @@ void init_rl(RLTable *rl, uint8_t index_run[MAX_RUN + 1]; int last, run, level, start, end, i; - /* If table is static, we can quit if rl->max_level[0] is not NULL */ + /* If table is static, we can quit if rl->max_level[0] is not NULL */ if (static_store && rl->max_level[0]) return; @@ -1132,25 +1132,30 @@ int ff_find_unused_picture(MpegEncContext *s, int shared) return AVERROR_INVALIDDATA; } -static void update_noise_reduction(MpegEncContext *s){ +static void update_noise_reduction(MpegEncContext *s) +{ int intra, i; - for(intra=0; intra<2; intra++){ - if(s->dct_count[intra] > (1<<16)){ - for(i=0; i<64; i++){ - s->dct_error_sum[intra][i] >>=1; + for (intra = 0; intra < 2; intra++) { + if (s->dct_count[intra] > (1 << 16)) { + for (i = 0; i < 64; i++) { + s->dct_error_sum[intra][i] >>= 1; } s->dct_count[intra] >>= 1; } - for(i=0; i<64; i++){ - s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1); + for (i = 0; i < 64; i++) { + s->dct_offset[intra][i] = (s->avctx->noise_reduction * + s->dct_count[intra] + + s->dct_error_sum[intra][i] / 2) / + (s->dct_error_sum[intra][i] + 1); } } } /** - * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded + * generic function for encode/decode called after coding/decoding + * the header and before a frame is coded/decoded. */ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { @@ -1158,42 +1163,49 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) Picture *pic; s->mb_skipped = 0; - assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); - - /* mark&release old frames */ - if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) { - if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){ - if (s->last_picture_ptr->owner2 == s) - free_frame_buffer(s, s->last_picture_ptr); - - /* release forgotten pictures */ - /* if(mpeg124/h263) */ - if(!s->encoding){ - for(i=0; ipicture_count; i++){ - if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) { - if (!(avctx->active_thread_type & FF_THREAD_FRAME)) - av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n"); - free_frame_buffer(s, &s->picture[i]); + assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 || + s->codec_id == CODEC_ID_SVQ3); + + /* mark & release old frames */ + if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && + s->last_picture_ptr != s->next_picture_ptr && + s->last_picture_ptr->f.data[0]) { + if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) { + if (s->last_picture_ptr->owner2 == s) + free_frame_buffer(s, s->last_picture_ptr); + + /* release forgotten pictures */ + /* if (mpeg124/h263) */ + if (!s->encoding) { + for (i = 0; i < s->picture_count; i++) { + if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && + &s->picture[i] != s->next_picture_ptr && + s->picture[i].f.reference) { + if (!(avctx->active_thread_type & FF_THREAD_FRAME)) + av_log(avctx, AV_LOG_ERROR, + "releasing zombie picture\n"); + free_frame_buffer(s, &s->picture[i]); + } } } } - } } - if(!s->encoding){ + if (!s->encoding) { ff_release_unused_pictures(s, 1); - if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL) - pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header) - else{ - i= ff_find_unused_picture(s, 0); - if (i < 0) - return i; - pic= &s->picture[i]; + if (s->current_picture_ptr && + s->current_picture_ptr->f.data[0] == NULL) { + // we already have a unused image + // (maybe it was set before reading the header) + pic = s->current_picture_ptr; + } else { + i = ff_find_unused_picture(s, 0); + pic = &s->picture[i]; } pic->f.reference = 0; - if (!s->dropable){ + if (!s->dropable) { if (s->codec_id == CODEC_ID_H264) pic->f.reference = s->picture_structure; else if (s->pict_type != AV_PICTURE_TYPE_B) @@ -1202,79 +1214,93 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) pic->f.coded_picture_number = s->coded_picture_number++; - if(ff_alloc_picture(s, pic, 0) < 0) + if (ff_alloc_picture(s, pic, 0) < 0) return -1; - s->current_picture_ptr= pic; - //FIXME use only the vars from current_pic + s->current_picture_ptr = pic; + // FIXME use only the vars from current_pic s->current_picture_ptr->f.top_field_first = s->top_field_first; - if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) { - if(s->picture_structure != PICT_FRAME) - s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field; + if (s->codec_id == CODEC_ID_MPEG1VIDEO || + s->codec_id == CODEC_ID_MPEG2VIDEO) { + if (s->picture_structure != PICT_FRAME) + s->current_picture_ptr->f.top_field_first = + (s->picture_structure == PICT_TOP_FIELD) == s->first_field; } - s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence; - s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; + s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && + !s->progressive_sequence; + s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME; } s->current_picture_ptr->f.pict_type = s->pict_type; -// if(s->flags && CODEC_FLAG_QSCALE) - // s->current_picture_ptr->quality= s->new_picture_ptr->quality; + // if (s->flags && CODEC_FLAG_QSCALE) + // s->current_picture_ptr->quality = s->new_picture_ptr->quality; s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; ff_copy_picture(&s->current_picture, s->current_picture_ptr); if (s->pict_type != AV_PICTURE_TYPE_B) { - s->last_picture_ptr= s->next_picture_ptr; - if(!s->dropable) - s->next_picture_ptr= s->current_picture_ptr; - } -/* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, - s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL, - s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL, - s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL, - s->pict_type, s->dropable);*/ - - if(s->codec_id != CODEC_ID_H264){ - if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) && - (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){ + s->last_picture_ptr = s->next_picture_ptr; + if (!s->dropable) + s->next_picture_ptr = s->current_picture_ptr; + } + /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", + s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, + s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL, + s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL, + s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL, + s->pict_type, s->dropable); */ + + if (s->codec_id != CODEC_ID_H264) { + if ((s->last_picture_ptr == NULL || + s->last_picture_ptr->f.data[0] == NULL) && + (s->pict_type != AV_PICTURE_TYPE_I || + s->picture_structure != PICT_FRAME)) { if (s->pict_type != AV_PICTURE_TYPE_I) - av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); + av_log(avctx, AV_LOG_ERROR, + "warning: first frame is no keyframe\n"); else if (s->picture_structure != PICT_FRAME) - av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n"); + av_log(avctx, AV_LOG_INFO, + "allocate dummy last picture for field based first keyframe\n"); /* Allocate a dummy frame */ - i= ff_find_unused_picture(s, 0); - if (i < 0) - return i; - s->last_picture_ptr= &s->picture[i]; - if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) + i = ff_find_unused_picture(s, 0); + s->last_picture_ptr = &s->picture[i]; + if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) return -1; - ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0); - ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1); + ff_thread_report_progress((AVFrame *) s->last_picture_ptr, + INT_MAX, 0); + ff_thread_report_progress((AVFrame *) s->last_picture_ptr, + INT_MAX, 1); } - if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) { + if ((s->next_picture_ptr == NULL || + s->next_picture_ptr->f.data[0] == NULL) && + s->pict_type == AV_PICTURE_TYPE_B) { /* Allocate a dummy frame */ - i= ff_find_unused_picture(s, 0); - if (i < 0) - return i; - s->next_picture_ptr= &s->picture[i]; - if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) + i = ff_find_unused_picture(s, 0); + s->next_picture_ptr = &s->picture[i]; + if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) return -1; - ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0); - ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1); + ff_thread_report_progress((AVFrame *) s->next_picture_ptr, + INT_MAX, 0); + ff_thread_report_progress((AVFrame *) s->next_picture_ptr, + INT_MAX, 1); } } - if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr); - if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr); + if (s->last_picture_ptr) + ff_copy_picture(&s->last_picture, s->last_picture_ptr); + if (s->next_picture_ptr) + ff_copy_picture(&s->next_picture, s->next_picture_ptr); - assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0])); + assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && + s->last_picture_ptr->f.data[0])); - if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){ + if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) { int i; - for(i=0; i<4; i++){ - if(s->picture_structure == PICT_BOTTOM_FIELD){ - s->current_picture.f.data[i] += s->current_picture.f.linesize[i]; + for (i = 0; i < 4; i++) { + if (s->picture_structure == PICT_BOTTOM_FIELD) { + s->current_picture.f.data[i] += + s->current_picture.f.linesize[i]; } s->current_picture.f.linesize[i] *= 2; s->last_picture.f.linesize[i] *= 2; @@ -1284,95 +1310,101 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->err_recognition = avctx->err_recognition; - /* set dequantizer, we can't do it during init as it might change for mpeg4 - and we can't do it in the header decode as init is not called for mpeg4 there yet */ - if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){ + /* set dequantizer, we can't do it during init as + * it might change for mpeg4 and we can't do it in the header + * decode as init is not called for mpeg4 there yet */ + if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) { s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter; - }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ + } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) { s->dct_unquantize_intra = s->dct_unquantize_h263_intra; s->dct_unquantize_inter = s->dct_unquantize_h263_inter; - }else{ + } else { s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; } - if(s->dct_error_sum){ + if (s->dct_error_sum) { assert(s->avctx->noise_reduction && s->encoding); - update_noise_reduction(s); } - if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) + if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) return ff_xvmc_field_start(s, avctx); return 0; } -/* generic function for encode/decode called after a frame has been coded/decoded */ +/* generic function for encode/decode called after a + * frame has been coded/decoded. */ void MPV_frame_end(MpegEncContext *s) { int i; /* redraw edges for the frame if decoding didn't complete */ - //just to make sure that all data is rendered. - if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){ + // just to make sure that all data is rendered. + if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) { ff_xvmc_field_end(s); - }else if((s->error_count || s->encoding) - && !s->avctx->hwaccel - && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) - && s->unrestricted_mv - && s->current_picture.f.reference - && !s->intra_only - && !(s->flags&CODEC_FLAG_EMU_EDGE)) { - int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w; - int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h; - s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize, - s->h_edge_pos , s->v_edge_pos, - EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize, - s->h_edge_pos>>hshift, s->v_edge_pos>>vshift, - EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM); - s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize, - s->h_edge_pos>>hshift, s->v_edge_pos>>vshift, - EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM); + } else if ((s->error_count || s->encoding) && + !s->avctx->hwaccel && + !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) && + s->unrestricted_mv && + s->current_picture.f.reference && + !s->intra_only && + !(s->flags & CODEC_FLAG_EMU_EDGE)) { + int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w; + int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h; + s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize, + s->h_edge_pos, s->v_edge_pos, + EDGE_WIDTH, EDGE_WIDTH, + EDGE_TOP | EDGE_BOTTOM); + s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize, + s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, + EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, + EDGE_TOP | EDGE_BOTTOM); + s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize, + s->h_edge_pos >> hshift, s->v_edge_pos >> vshift, + EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, + EDGE_TOP | EDGE_BOTTOM); } emms_c(); - s->last_pict_type = s->pict_type; - s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality; - if(s->pict_type!=AV_PICTURE_TYPE_B){ - s->last_non_b_pict_type= s->pict_type; + s->last_pict_type = s->pict_type; + s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality; + if (s->pict_type!= AV_PICTURE_TYPE_B) { + s->last_non_b_pict_type = s->pict_type; } #if 0 - /* copy back current_picture variables */ - for(i=0; ipicture[i].f.data[0] == s->current_picture.f.data[0]){ - s->picture[i]= s->current_picture; + /* copy back current_picture variables */ + for (i = 0; i < MAX_PICTURE_COUNT; i++) { + if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) { + s->picture[i] = s->current_picture; break; } } - assert(iencoding){ + if (s->encoding) { /* release non-reference frames */ - for(i=0; ipicture_count; i++){ - if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) { + for (i = 0; i < s->picture_count; i++) { + if (s->picture[i].f.data[0] && !s->picture[i].f.reference + /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) { free_frame_buffer(s, &s->picture[i]); } } } // clear copies, to avoid confusion #if 0 - memset(&s->last_picture, 0, sizeof(Picture)); - memset(&s->next_picture, 0, sizeof(Picture)); + memset(&s->last_picture, 0, sizeof(Picture)); + memset(&s->next_picture, 0, sizeof(Picture)); memset(&s->current_picture, 0, sizeof(Picture)); #endif - s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr; + s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr; if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) { - ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0); + ff_thread_report_progress((AVFrame *) s->current_picture_ptr, + s->mb_height - 1, 0); } } @@ -1383,44 +1415,48 @@ void MPV_frame_end(MpegEncContext *s) * @param stride stride/linesize of the image * @param color color of the arrow */ -static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ +static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, + int w, int h, int stride, int color) +{ int x, y, fr, f; - sx= av_clip(sx, 0, w-1); - sy= av_clip(sy, 0, h-1); - ex= av_clip(ex, 0, w-1); - ey= av_clip(ey, 0, h-1); + sx = av_clip(sx, 0, w - 1); + sy = av_clip(sy, 0, h - 1); + ex = av_clip(ex, 0, w - 1); + ey = av_clip(ey, 0, h - 1); - buf[sy*stride + sx]+= color; + buf[sy * stride + sx] += color; - if(FFABS(ex - sx) > FFABS(ey - sy)){ - if(sx > ex){ + if (FFABS(ex - sx) > FFABS(ey - sy)) { + if (sx > ex) { FFSWAP(int, sx, ex); FFSWAP(int, sy, ey); } - buf+= sx + sy*stride; - ex-= sx; - f= ((ey-sy)<<16)/ex; - for(x= 0; x <= ex; x++){ - y = (x*f)>>16; - fr= (x*f)&0xFFFF; - buf[ y *stride + x]+= (color*(0x10000-fr))>>16; - buf[(y+1)*stride + x]+= (color* fr )>>16; + buf += sx + sy * stride; + ex -= sx; + f = ((ey - sy) << 16) / ex; + for (x = 0; x = ex; x++) { + y = (x * f) >> 16; + fr = (x * f) & 0xFFFF; + buf[y * stride + x] += (color * (0x10000 - fr)) >> 16; + buf[(y + 1) * stride + x] += (color * fr ) >> 16; } - }else{ - if(sy > ey){ + } else { + if (sy > ey) { FFSWAP(int, sx, ex); FFSWAP(int, sy, ey); } - buf+= sx + sy*stride; - ey-= sy; - if(ey) f= ((ex-sx)<<16)/ey; - else f= 0; - for(y= 0; y <= ey; y++){ - x = (y*f)>>16; - fr= (y*f)&0xFFFF; - buf[y*stride + x ]+= (color*(0x10000-fr))>>16; - buf[y*stride + x+1]+= (color* fr )>>16; + buf += sx + sy * stride; + ey -= sy; + if (ey) + f = ((ex - sx) << 16) / ey; + else + f = 0; + for (y = 0; y = ey; y++) { + x = (y * f) >> 16; + fr = (y * f) & 0xFFFF; + buf[y * stride + x] += (color * (0x10000 - fr)) >> 16; + buf[y * stride + x + 1] += (color * fr ) >> 16; } } } @@ -1432,25 +1468,27 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h * @param stride stride/linesize of the image * @param color color of the arrow */ -static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ +static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, + int ey, int w, int h, int stride, int color) +{ int dx,dy; - sx= av_clip(sx, -100, w+100); - sy= av_clip(sy, -100, h+100); - ex= av_clip(ex, -100, w+100); - ey= av_clip(ey, -100, h+100); + sx = av_clip(sx, -100, w + 100); + sy = av_clip(sy, -100, h + 100); + ex = av_clip(ex, -100, w + 100); + ey = av_clip(ey, -100, h + 100); - dx= ex - sx; - dy= ey - sy; + dx = ex - sx; + dy = ey - sy; - if(dx*dx + dy*dy > 3*3){ - int rx= dx + dy; - int ry= -dx + dy; - int length= ff_sqrt((rx*rx + ry*ry)<<8); + if (dx * dx + dy * dy > 3 * 3) { + int rx = dx + dy; + int ry = -dx + dy; + int length = ff_sqrt((rx * rx + ry * ry) << 8); - //FIXME subpixel accuracy - rx= ROUNDED_DIV(rx*3<<4, length); - ry= ROUNDED_DIV(ry*3<<4, length); + // FIXME subpixel accuracy + rx = ROUNDED_DIV(rx * 3 << 4, length); + ry = ROUNDED_DIV(ry * 3 << 4, length); draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color); draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color); @@ -1459,306 +1497,354 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int } /** - * Print debuging info for the given picture. + * Print debugging info for the given picture. */ -void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){ - - if(s->avctx->hwaccel || !pict || !pict->mb_type) return; +void ff_print_debug_info(MpegEncContext *s, AVFrame *pict) +{ + if (s->avctx->hwaccel || !pict || !pict->mb_type) + return; - if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){ + if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) { int x,y; av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: "); switch (pict->pict_type) { - case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break; - case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break; - case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break; - case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break; - case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break; - case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break; - } - for(y=0; ymb_height; y++){ - for(x=0; xmb_width; x++){ - if(s->avctx->debug&FF_DEBUG_SKIP){ - int count= s->mbskip_table[x + y*s->mb_stride]; - if(count>9) count=9; + case AV_PICTURE_TYPE_I: + av_log(s->avctx,AV_LOG_DEBUG,"I\n"); + break; + case AV_PICTURE_TYPE_P: + av_log(s->avctx,AV_LOG_DEBUG,"P\n"); + break; + case AV_PICTURE_TYPE_B: + av_log(s->avctx,AV_LOG_DEBUG,"B\n"); + break; + case AV_PICTURE_TYPE_S: + av_log(s->avctx,AV_LOG_DEBUG,"S\n"); + break; + case AV_PICTURE_TYPE_SI: + av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); + break; + case AV_PICTURE_TYPE_SP: + av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); + break; + } + for (y = 0; y < s->mb_height; y++) { + for (x = 0; x < s->mb_width; x++) { + if (s->avctx->debug & FF_DEBUG_SKIP) { + int count = s->mbskip_table[x + y * s->mb_stride]; + if (count > 9) + count = 9; av_log(s->avctx, AV_LOG_DEBUG, "%1d", count); } - if(s->avctx->debug&FF_DEBUG_QP){ - av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]); + if (s->avctx->debug & FF_DEBUG_QP) { + av_log(s->avctx, AV_LOG_DEBUG, "%2d", + pict->qscale_table[x + y * s->mb_stride]); } - if(s->avctx->debug&FF_DEBUG_MB_TYPE){ - int mb_type= pict->mb_type[x + y*s->mb_stride]; - //Type & MV direction - if(IS_PCM(mb_type)) + if (s->avctx->debug & FF_DEBUG_MB_TYPE) { + int mb_type = pict->mb_type[x + y * s->mb_stride]; + // Type & MV direction + if (IS_PCM(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "P"); - else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type)) + else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "A"); - else if(IS_INTRA4x4(mb_type)) + else if (IS_INTRA4x4(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "i"); - else if(IS_INTRA16x16(mb_type)) + else if (IS_INTRA16x16(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "I"); - else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)) + else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "d"); - else if(IS_DIRECT(mb_type)) + else if (IS_DIRECT(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "D"); - else if(IS_GMC(mb_type) && IS_SKIP(mb_type)) + else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "g"); - else if(IS_GMC(mb_type)) + else if (IS_GMC(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "G"); - else if(IS_SKIP(mb_type)) + else if (IS_SKIP(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "S"); - else if(!USES_LIST(mb_type, 1)) + else if (!USES_LIST(mb_type, 1)) av_log(s->avctx, AV_LOG_DEBUG, ">"); - else if(!USES_LIST(mb_type, 0)) + else if (!USES_LIST(mb_type, 0)) av_log(s->avctx, AV_LOG_DEBUG, "<"); - else{ + else { assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); av_log(s->avctx, AV_LOG_DEBUG, "X"); } - //segmentation - if(IS_8X8(mb_type)) + // segmentation + if (IS_8X8(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "+"); - else if(IS_16X8(mb_type)) + else if (IS_16X8(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "-"); - else if(IS_8X16(mb_type)) + else if (IS_8X16(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "|"); - else if(IS_INTRA(mb_type) || IS_16X16(mb_type)) + else if (IS_INTRA(mb_type) || IS_16X16(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, " "); else av_log(s->avctx, AV_LOG_DEBUG, "?"); - if(IS_INTERLACED(mb_type)) + if (IS_INTERLACED(mb_type)) av_log(s->avctx, AV_LOG_DEBUG, "="); else av_log(s->avctx, AV_LOG_DEBUG, " "); } -// av_log(s->avctx, AV_LOG_DEBUG, " "); + // av_log(s->avctx, AV_LOG_DEBUG, " "); } av_log(s->avctx, AV_LOG_DEBUG, "\n"); } } if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || - s->avctx->debug_mv) { - const int shift= 1 + s->quarter_sample; + (s->avctx->debug_mv)) { + const int shift = 1 + s->quarter_sample; int mb_y; uint8_t *ptr; int i; int h_chroma_shift, v_chroma_shift, block_height; - const int width = s->avctx->width; - const int height= s->avctx->height; - const int mv_sample_log2= 4 - pict->motion_subsample_log2; - const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1); - s->low_delay=0; //needed to see the vectors without trashing the buffers - - avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); - for(i=0; i<3; i++){ - memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift); - pict->data[i]= s->visualization_buffer[i]; - } - pict->type= FF_BUFFER_TYPE_COPY; - ptr= pict->data[0]; - block_height = 16>>v_chroma_shift; - - for(mb_y=0; mb_ymb_height; mb_y++){ + const int width = s->avctx->width; + const int height = s->avctx->height; + const int mv_sample_log2 = 4 - pict->motion_subsample_log2; + const int mv_stride = (s->mb_width << mv_sample_log2) + + (s->codec_id == CODEC_ID_H264 ? 0 : 1); + s->low_delay = 0; // needed to see the vectors without trashing the buffers + + avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, + &h_chroma_shift, &v_chroma_shift); + for (i = 0; i < 3; i++) { + memcpy(s->visualization_buffer[i], pict->data[i], + (i == 0) ? pict->linesize[i] * height: + pict->linesize[i] * height >> v_chroma_shift); + pict->data[i] = s->visualization_buffer[i]; + } + pict->type = FF_BUFFER_TYPE_COPY; + ptr = pict->data[0]; + block_height = 16 >> v_chroma_shift; + + for (mb_y = 0; mb_y < s->mb_height; mb_y++) { int mb_x; - for(mb_x=0; mb_xmb_width; mb_x++){ - const int mb_index= mb_x + mb_y*s->mb_stride; - if (s->avctx->debug_mv && pict->motion_val) { - int type; - for(type=0; type<3; type++){ - int direction = 0; - switch (type) { - case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P)) + for (mb_x = 0; mb_x < s->mb_width; mb_x++) { + const int mb_index = mb_x + mb_y * s->mb_stride; + if ((s->avctx->debug_mv) && pict->motion_val) { + int type; + for (type = 0; type < 3; type++) { + int direction = 0; + switch (type) { + case 0: + if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) || + (pict->pict_type!= AV_PICTURE_TYPE_P)) continue; - direction = 0; - break; - case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B)) + direction = 0; + break; + case 1: + if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) || + (pict->pict_type!= AV_PICTURE_TYPE_B)) continue; - direction = 0; - break; - case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B)) + direction = 0; + break; + case 2: + if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) || + (pict->pict_type!= AV_PICTURE_TYPE_B)) continue; - direction = 1; - break; - } - if(!USES_LIST(pict->mb_type[mb_index], direction)) - continue; - - if(IS_8X8(pict->mb_type[mb_index])){ - int i; - for(i=0; i<4; i++){ - int sx= mb_x*16 + 4 + 8*(i&1); - int sy= mb_y*16 + 4 + 8*(i>>1); - int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1); - int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; - int my= (pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100); - } - }else if(IS_16X8(pict->mb_type[mb_index])){ - int i; - for(i=0; i<2; i++){ - int sx=mb_x*16 + 8; - int sy=mb_y*16 + 4 + 8*i; - int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1); - int mx=(pict->motion_val[direction][xy][0]>>shift); - int my=(pict->motion_val[direction][xy][1]>>shift); - - if(IS_INTERLACED(pict->mb_type[mb_index])) - my*=2; - - draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100); - } - }else if(IS_8X16(pict->mb_type[mb_index])){ - int i; - for(i=0; i<2; i++){ - int sx=mb_x*16 + 4 + 8*i; - int sy=mb_y*16 + 8; - int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1); - int mx=(pict->motion_val[direction][xy][0]>>shift); - int my=(pict->motion_val[direction][xy][1]>>shift); - - if(IS_INTERLACED(pict->mb_type[mb_index])) - my*=2; - - draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100); - } - }else{ - int sx= mb_x*16 + 8; - int sy= mb_y*16 + 8; - int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2; - int mx= (pict->motion_val[direction][xy][0]>>shift) + sx; - int my= (pict->motion_val[direction][xy][1]>>shift) + sy; - draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100); + direction = 1; + break; + } + if (!USES_LIST(pict->mb_type[mb_index], direction)) + continue; + + if (IS_8X8(pict->mb_type[mb_index])) { + int i; + for (i = 0; i < 4; i++) { + int sx = mb_x * 16 + 4 + 8 * (i & 1); + int sy = mb_y * 16 + 4 + 8 * (i >> 1); + int xy = (mb_x * 2 + (i & 1) + + (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1); + int mx = (pict->motion_val[direction][xy][0] >> shift) + sx; + int my = (pict->motion_val[direction][xy][1] >> shift) + sy; + draw_arrow(ptr, sx, sy, mx, my, width, + height, s->linesize, 100); + } + } else if (IS_16X8(pict->mb_type[mb_index])) { + int i; + for (i = 0; i < 2; i++) { + int sx = mb_x * 16 + 8; + int sy = mb_y * 16 + 4 + 8 * i; + int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1); + int mx = (pict->motion_val[direction][xy][0] >> shift); + int my = (pict->motion_val[direction][xy][1] >> shift); + + if (IS_INTERLACED(pict->mb_type[mb_index])) + my *= 2; + + draw_arrow(ptr, sx, sy, mx + sx, my + sy, width, + height, s->linesize, 100); + } + } else if (IS_8X16(pict->mb_type[mb_index])) { + int i; + for (i = 0; i < 2; i++) { + int sx = mb_x * 16 + 4 + 8 * i; + int sy = mb_y * 16 + 8; + int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1); + int mx = pict->motion_val[direction][xy][0] >> shift; + int my = pict->motion_val[direction][xy][1] >> shift; + + if (IS_INTERLACED(pict->mb_type[mb_index])) + my *= 2; + + draw_arrow(ptr, sx, sy, mx + sx, my + sy, width, + height, s->linesize, 100); + } + } else { + int sx = mb_x * 16 + 8; + int sy = mb_y * 16 + 8; + int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2; + int mx = pict->motion_val[direction][xy][0] >> shift + sx; + int my = pict->motion_val[direction][xy][1] >> shift + sy; + draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100); + } } - } } - if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){ - uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL; + if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) { + uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) * + 0x0101010101010101ULL; int y; - for(y=0; ydata[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c; - *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c; + for (y = 0; y < block_height; y++) { + *(uint64_t *)(pict->data[1] + 8 * mb_x + + (block_height * mb_y + y) * + pict->linesize[1]) = c; + *(uint64_t *)(pict->data[2] + 8 * mb_x + + (block_height * mb_y + y) * + pict->linesize[2]) = c; } } - if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){ - int mb_type= pict->mb_type[mb_index]; + if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) && + pict->motion_val) { + int mb_type = pict->mb_type[mb_index]; uint64_t u,v; int y; -#define COLOR(theta, r)\ -u= (int)(128 + r*cos(theta*3.141592/180));\ -v= (int)(128 + r*sin(theta*3.141592/180)); - - - u=v=128; - if(IS_PCM(mb_type)){ - COLOR(120,48) - }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){ - COLOR(30,48) - }else if(IS_INTRA4x4(mb_type)){ - COLOR(90,48) - }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){ -// COLOR(120,48) - }else if(IS_DIRECT(mb_type)){ - COLOR(150,48) - }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){ - COLOR(170,48) - }else if(IS_GMC(mb_type)){ - COLOR(190,48) - }else if(IS_SKIP(mb_type)){ -// COLOR(180,48) - }else if(!USES_LIST(mb_type, 1)){ - COLOR(240,48) - }else if(!USES_LIST(mb_type, 0)){ - COLOR(0,48) - }else{ +#define COLOR(theta, r) \ + u = (int)(128 + r * cos(theta * 3.141592 / 180)); \ + v = (int)(128 + r * sin(theta * 3.141592 / 180)); + + + u = v = 128; + if (IS_PCM(mb_type)) { + COLOR(120, 48) + } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || + IS_INTRA16x16(mb_type)) { + COLOR(30, 48) + } else if (IS_INTRA4x4(mb_type)) { + COLOR(90, 48) + } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) { + // COLOR(120, 48) + } else if (IS_DIRECT(mb_type)) { + COLOR(150, 48) + } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) { + COLOR(170, 48) + } else if (IS_GMC(mb_type)) { + COLOR(190, 48) + } else if (IS_SKIP(mb_type)) { + // COLOR(180, 48) + } else if (!USES_LIST(mb_type, 1)) { + COLOR(240, 48) + } else if (!USES_LIST(mb_type, 0)) { + COLOR(0, 48) + } else { assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1)); COLOR(300,48) } - u*= 0x0101010101010101ULL; - v*= 0x0101010101010101ULL; - for(y=0; ydata[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u; - *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v; + u *= 0x0101010101010101ULL; + v *= 0x0101010101010101ULL; + for (y = 0; y < block_height; y++) { + *(uint64_t *)(pict->data[1] + 8 * mb_x + + (block_height * mb_y + y) * pict->linesize[1]) = u; + *(uint64_t *)(pict->data[2] + 8 * mb_x + + (block_height * mb_y + y) * pict->linesize[2]) = v; } - //segmentation - if(IS_8X8(mb_type) || IS_16X8(mb_type)){ - *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL; - *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL; + // segmentation + if (IS_8X8(mb_type) || IS_16X8(mb_type)) { + *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 + + (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL; + *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 + + (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL; } - if(IS_8X8(mb_type) || IS_8X16(mb_type)){ - for(y=0; y<16; y++) - pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80; + if (IS_8X8(mb_type) || IS_8X16(mb_type)) { + for (y = 0; y < 16; y++) + pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) * + pict->linesize[0]] ^= 0x80; } - if(IS_8X8(mb_type) && mv_sample_log2 >= 2){ - int dm= 1 << (mv_sample_log2-2); - for(i=0; i<4; i++){ - int sx= mb_x*16 + 8*(i&1); - int sy= mb_y*16 + 8*(i>>1); - int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1); - //FIXME bidir - int32_t *mv = (int32_t*)&pict->motion_val[0][xy]; - if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)]) - for(y=0; y<8; y++) - pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80; - if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)]) - *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL; + if (IS_8X8(mb_type) && mv_sample_log2 >= 2) { + int dm = 1 << (mv_sample_log2 - 2); + for (i = 0; i < 4; i++) { + int sx = mb_x * 16 + 8 * (i & 1); + int sy = mb_y * 16 + 8 * (i >> 1); + int xy = (mb_x * 2 + (i & 1) + + (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1); + // FIXME bidir + int32_t *mv = (int32_t *) &pict->motion_val[0][xy]; + if (mv[0] != mv[dm] || + mv[dm * mv_stride] != mv[dm * (mv_stride + 1)]) + for (y = 0; y < 8; y++) + pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80; + if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)]) + *(uint64_t *)(pict->data[0] + sx + (sy + 4) * + pict->linesize[0]) ^= 0x8080808080808080ULL; } } - if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){ + if (IS_INTERLACED(mb_type) && + s->codec_id == CODEC_ID_H264) { // hmm } } - s->mbskip_table[mb_index]=0; + s->mbskip_table[mb_index] = 0; } } } } static inline int hpel_motion_lowres(MpegEncContext *s, - uint8_t *dest, uint8_t *src, - int field_based, int field_select, - int src_x, int src_y, - int width, int height, int stride, - int h_edge_pos, int v_edge_pos, - int w, int h, h264_chroma_mc_func *pix_op, - int motion_x, int motion_y) + uint8_t *dest, uint8_t *src, + int field_based, int field_select, + int src_x, int src_y, + int width, int height, int stride, + int h_edge_pos, int v_edge_pos, + int w, int h, h264_chroma_mc_func *pix_op, + int motion_x, int motion_y) { - const int lowres= s->avctx->lowres; - const int op_index= FFMIN(lowres, 2); - const int s_mask= (2<avctx->lowres; + const int op_index = FFMIN(lowres, 2); + const int s_mask = (2 << lowres) - 1; + int emu = 0; int sx, sy; - if(s->quarter_sample){ - motion_x/=2; - motion_y/=2; + if (s->quarter_sample) { + motion_x /= 2; + motion_y /= 2; } - sx= motion_x & s_mask; - sy= motion_y & s_mask; - src_x += motion_x >> (lowres+1); - src_y += motion_y >> (lowres+1); + sx = motion_x & s_mask; + sy = motion_y & s_mask; + src_x += motion_x >> lowres + 1; + src_y += motion_y >> lowres + 1; - src += src_y * stride + src_x; + src += src_y * stride + src_x; - if( (unsigned)src_x > h_edge_pos - (!!sx) - w - || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){ - s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<edge_emu_buffer; - emu=1; + if ((unsigned)src_x > h_edge_pos - (!!sx) - w || + (unsigned)src_y > (v_edge_pos >> field_based) - (!!sy) - h) { + s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1, + (h + 1) << field_based, src_x, + src_y << field_based, + h_edge_pos, + v_edge_pos); + src = s->edge_emu_buffer; + emu = 1; } - sx= (sx << 2) >> lowres; - sy= (sy << 2) >> lowres; - if(field_select) + sx = (sx << 2) >> lowres; + sy = (sy << 2) >> lowres; + if (field_select) src += s->linesize; pix_op[op_index](dest, src, stride, h, sx, sy); return emu; @@ -1766,149 +1852,170 @@ static inline int hpel_motion_lowres(MpegEncContext *s, /* apply one mpeg motion vector to the three components */ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int field_based, int bottom_field, int field_select, - uint8_t **ref_picture, h264_chroma_mc_func *pix_op, - int motion_x, int motion_y, int h, int mb_y) + uint8_t *dest_y, + uint8_t *dest_cb, + uint8_t *dest_cr, + int field_based, + int bottom_field, + int field_select, + uint8_t **ref_picture, + h264_chroma_mc_func *pix_op, + int motion_x, int motion_y, + int h, int mb_y) { uint8_t *ptr_y, *ptr_cb, *ptr_cr; - int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy; - const int lowres= s->avctx->lowres; - const int op_index= FFMIN(lowres, 2); - const int block_s= 8>>lowres; - const int s_mask= (2<avctx->lowres; + const int op_index = FFMIN(lowres, 2); + const int block_s = 8>>lowres; + const int s_mask = (2 << lowres) - 1; const int h_edge_pos = s->h_edge_pos >> lowres; const int v_edge_pos = s->v_edge_pos >> lowres; linesize = s->current_picture.f.linesize[0] << field_based; uvlinesize = s->current_picture.f.linesize[1] << field_based; - if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway - motion_x/=2; - motion_y/=2; + // FIXME obviously not perfect but qpel will not work in lowres anyway + if (s->quarter_sample) { + motion_x /= 2; + motion_y /= 2; } - if(field_based){ - motion_y += (bottom_field - field_select)*((1<mb_x*2*block_s + (motion_x >> (lowres+1)); - src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1)); + sx = motion_x & s_mask; + sy = motion_y & s_mask; + src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1); + src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1); if (s->out_format == FMT_H263) { - uvsx = ((motion_x>>1) & s_mask) | (sx&1); - uvsy = ((motion_y>>1) & s_mask) | (sy&1); - uvsrc_x = src_x>>1; - uvsrc_y = src_y>>1; - }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261 - mx = motion_x / 4; - my = motion_y / 4; - uvsx = (2*mx) & s_mask; - uvsy = (2*my) & s_mask; - uvsrc_x = s->mb_x*block_s + (mx >> lowres); - uvsrc_y = mb_y*block_s + (my >> lowres); + uvsx = ((motion_x >> 1) & s_mask) | (sx & 1); + uvsy = ((motion_y >> 1) & s_mask) | (sy & 1); + uvsrc_x = src_x >> 1; + uvsrc_y = src_y >> 1; + } else if (s->out_format == FMT_H261) { + // even chroma mv's are full pel in H261 + mx = motion_x / 4; + my = motion_y / 4; + uvsx = (2 * mx) & s_mask; + uvsy = (2 * my) & s_mask; + uvsrc_x = s->mb_x * block_s + (mx >> lowres); + uvsrc_y = mb_y * block_s + (my >> lowres); } else { - mx = motion_x / 2; - my = motion_y / 2; - uvsx = mx & s_mask; - uvsy = my & s_mask; - uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1)); - uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1)); + mx = motion_x / 2; + my = motion_y / 2; + uvsx = mx & s_mask; + uvsy = my & s_mask; + uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1); + uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1); } - ptr_y = ref_picture[0] + src_y * linesize + src_x; + ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; - if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s - || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){ - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based, - src_x, src_y<edge_emu_buffer; - if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ - uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize; - s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based, - uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); - s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based, - uvsrc_x, uvsrc_y<>1, v_edge_pos>>1); - ptr_cb= uvbuf; - ptr_cr= uvbuf+16; - } + if ((unsigned) src_x > h_edge_pos - (!!sx) - 2 * block_s || + (unsigned) src_y > (v_edge_pos >> field_based) - (!!sy) - h) { + s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, + s->linesize, 17, 17 + field_based, + src_x, src_y << field_based, h_edge_pos, + v_edge_pos); + ptr_y = s->edge_emu_buffer; + if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { + uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize; + s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, + 9 + field_based, + uvsrc_x, uvsrc_y << field_based, + h_edge_pos >> 1, v_edge_pos >> 1); + s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9, + 9 + field_based, + uvsrc_x, uvsrc_y << field_based, + h_edge_pos >> 1, v_edge_pos >> 1); + ptr_cb = uvbuf; + ptr_cr = uvbuf + 16; + } } - if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data - dest_y += s->linesize; - dest_cb+= s->uvlinesize; - dest_cr+= s->uvlinesize; + // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data + if (bottom_field) { + dest_y += s->linesize; + dest_cb += s->uvlinesize; + dest_cr += s->uvlinesize; } - if(field_select){ - ptr_y += s->linesize; - ptr_cb+= s->uvlinesize; - ptr_cr+= s->uvlinesize; + if (field_select) { + ptr_y += s->linesize; + ptr_cb += s->uvlinesize; + ptr_cr += s->uvlinesize; } - sx= (sx << 2) >> lowres; - sy= (sy << 2) >> lowres; - pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy); + sx = (sx << 2) >> lowres; + sy = (sy << 2) >> lowres; + pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy); - if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ - uvsx= (uvsx << 2) >> lowres; - uvsy= (uvsy << 2) >> lowres; - pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); - pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy); + if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) { + uvsx = (uvsx << 2) >> lowres; + uvsy = (uvsy << 2) >> lowres; + pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, + uvsx, uvsy); + pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, + uvsx, uvsy); } - //FIXME h261 lowres loop filter + // FIXME h261 lowres loop filter } static inline void chroma_4mv_motion_lowres(MpegEncContext *s, - uint8_t *dest_cb, uint8_t *dest_cr, - uint8_t **ref_picture, - h264_chroma_mc_func *pix_op, - int mx, int my){ - const int lowres= s->avctx->lowres; - const int op_index= FFMIN(lowres, 2); - const int block_s= 8>>lowres; - const int s_mask= (2<h_edge_pos >> (lowres+1); - const int v_edge_pos = s->v_edge_pos >> (lowres+1); - int emu=0, src_x, src_y, offset, sx, sy; + uint8_t *dest_cb, uint8_t *dest_cr, + uint8_t **ref_picture, + h264_chroma_mc_func * pix_op, + int mx, int my) +{ + const int lowres = s->avctx->lowres; + const int op_index = FFMIN(lowres, 2); + const int block_s = 8 >> lowres; + const int s_mask = (2 << lowres) - 1; + const int h_edge_pos = s->h_edge_pos >> lowres + 1; + const int v_edge_pos = s->v_edge_pos >> lowres + 1; + int emu = 0, src_x, src_y, offset, sx, sy; uint8_t *ptr; - if(s->quarter_sample){ - mx/=2; - my/=2; + if (s->quarter_sample) { + mx /= 2; + my /= 2; } /* In case of 8X8, we construct a single chroma motion vector with a special rounding */ - mx= ff_h263_round_chroma(mx); - my= ff_h263_round_chroma(my); + mx = ff_h263_round_chroma(mx); + my = ff_h263_round_chroma(my); - sx= mx & s_mask; - sy= my & s_mask; - src_x = s->mb_x*block_s + (mx >> (lowres+1)); - src_y = s->mb_y*block_s + (my >> (lowres+1)); + sx = mx & s_mask; + sy = my & s_mask; + src_x = s->mb_x * block_s + (mx >> lowres + 1); + src_y = s->mb_y * block_s + (my >> lowres + 1); offset = src_y * s->uvlinesize + src_x; ptr = ref_picture[1] + offset; - if(s->flags&CODEC_FLAG_EMU_EDGE){ - if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s - || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){ - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr= s->edge_emu_buffer; - emu=1; + if (s->flags & CODEC_FLAG_EMU_EDGE) { + if ((unsigned) src_x > h_edge_pos - (!!sx) - block_s || + (unsigned) src_y > v_edge_pos - (!!sy) - block_s) { + s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, + 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); + ptr = s->edge_emu_buffer; + emu = 1; } } - sx= (sx << 2) >> lowres; - sy= (sy << 2) >> lowres; + sx = (sx << 2) >> lowres; + sy = (sy << 2) >> lowres; pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy); ptr = ref_picture[2] + offset; - if(emu){ - s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr= s->edge_emu_buffer; + if (emu) { + s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, + src_x, src_y, h_edge_pos, v_edge_pos); + ptr = s->edge_emu_buffer; } pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); } @@ -1925,117 +2032,133 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s, * the motion vectors are taken from s->mv and the MV type from s->mv_type */ static inline void MPV_motion_lowres(MpegEncContext *s, - uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, - int dir, uint8_t **ref_picture, - h264_chroma_mc_func *pix_op) + uint8_t *dest_y, uint8_t *dest_cb, + uint8_t *dest_cr, + int dir, uint8_t **ref_picture, + h264_chroma_mc_func *pix_op) { int mx, my; int mb_x, mb_y, i; - const int lowres= s->avctx->lowres; - const int block_s= 8>>lowres; + const int lowres = s->avctx->lowres; + const int block_s = 8 >>lowres; mb_x = s->mb_x; mb_y = s->mb_y; - switch(s->mv_type) { + switch (s->mv_type) { case MV_TYPE_16X16: mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, 0, - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y); + 0, 0, 0, + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], + 2 * block_s, mb_y); break; case MV_TYPE_8X8: mx = 0; my = 0; - for(i=0;i<4;i++) { - hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s, - ref_picture[0], 0, 0, - (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s, - s->width, s->height, s->linesize, - s->h_edge_pos >> lowres, s->v_edge_pos >> lowres, - block_s, block_s, pix_op, - s->mv[dir][i][0], s->mv[dir][i][1]); - - mx += s->mv[dir][i][0]; - my += s->mv[dir][i][1]; - } + for (i = 0; i < 4; i++) { + hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * + s->linesize) * block_s, + ref_picture[0], 0, 0, + (2 * mb_x + (i & 1)) * block_s, + (2 * mb_y + (i >> 1)) * block_s, + s->width, s->height, s->linesize, + s->h_edge_pos >> lowres, s->v_edge_pos >> lowres, + block_s, block_s, pix_op, + s->mv[dir][i][0], s->mv[dir][i][1]); + + mx += s->mv[dir][i][0]; + my += s->mv[dir][i][1]; + } - if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)) - chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my); + if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) + chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, + pix_op, mx, my); break; case MV_TYPE_FIELD: if (s->picture_structure == PICT_FRAME) { /* top field */ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, 0, s->field_select[dir][0], - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y); + 1, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], s->mv[dir][0][1], + block_s, mb_y); /* bottom field */ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, 1, s->field_select[dir][1], - ref_picture, pix_op, - s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y); + 1, 1, s->field_select[dir][1], + ref_picture, pix_op, + s->mv[dir][1][0], s->mv[dir][1][1], + block_s, mb_y); } else { - if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){ + if (s->picture_structure != s->field_select[dir][0] + 1 && + s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) { ref_picture = s->current_picture_ptr->f.data; - } + } mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->field_select[dir][0], - ref_picture, pix_op, - s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1); - } + 0, 0, s->field_select[dir][0], + ref_picture, pix_op, + s->mv[dir][0][0], + s->mv[dir][0][1], 2 * block_s, mb_y >> 1); + } break; case MV_TYPE_16X8: - for(i=0; i<2; i++){ - uint8_t ** ref2picture; + for (i = 0; i < 2; i++) { + uint8_t **ref2picture; - if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){ - ref2picture= ref_picture; - }else{ + if (s->picture_structure == s->field_select[dir][i] + 1 || + s->pict_type == AV_PICTURE_TYPE_B || s->first_field) { + ref2picture = ref_picture; + } else { ref2picture = s->current_picture_ptr->f.data; } mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->field_select[dir][i], - ref2picture, pix_op, - s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1); - - dest_y += 2*block_s*s->linesize; - dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize; - dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize; + 0, 0, s->field_select[dir][i], + ref2picture, pix_op, + s->mv[dir][i][0], s->mv[dir][i][1] + + 2 * block_s * i, block_s, mb_y >> 1); + + dest_y += 2 * block_s * s->linesize; + dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize; + dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize; } break; case MV_TYPE_DMV: - if(s->picture_structure == PICT_FRAME){ - for(i=0; i<2; i++){ + if (s->picture_structure == PICT_FRAME) { + for (i = 0; i < 2; i++) { int j; - for(j=0; j<2; j++){ + for (j = 0; j < 2; j++) { mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 1, j, j^i, - ref_picture, pix_op, - s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y); + 1, j, j ^ i, + ref_picture, pix_op, + s->mv[dir][2 * i + j][0], + s->mv[dir][2 * i + j][1], + block_s, mb_y); } pix_op = s->dsp.avg_h264_chroma_pixels_tab; } - }else{ - for(i=0; i<2; i++){ + } else { + for (i = 0; i < 2; i++) { mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr, - 0, 0, s->picture_structure != i+1, - ref_picture, pix_op, - s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1); + 0, 0, s->picture_structure != i + 1, + ref_picture, pix_op, + s->mv[dir][2 * i][0],s->mv[dir][2 * i][1], + 2 * block_s, mb_y >> 1); // after put we make avg of the same block pix_op = s->dsp.avg_h264_chroma_pixels_tab; - //opposite parity is always in the same frame if this is second field - if(!s->first_field){ + // opposite parity is always in the same + // frame if this is second field + if (!s->first_field) { ref_picture = s->current_picture_ptr->f.data; } } } - break; - default: assert(0); + break; + default: + assert(0); } } From 06e8d38f2bebfb3c79c9f5e907f975ed02841f94 Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 25 Dec 2011 18:24:14 +0100 Subject: [PATCH 11/12] Add minor bumps and APIchanges entries for lavc/lavfi changes. --- doc/APIchanges | 14 ++++++++++++++ libavcodec/version.h | 2 +- libavfilter/avfilter.h | 4 ++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/doc/APIchanges b/doc/APIchanges index fa4e75e8e2..29a537d095 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -13,6 +13,20 @@ libavutil: 2011-04-18 API changes, most recent first: +2011-12-25 - lavfi 2.14.0 + e1d9dbf Add a new installed header - buffersrc.h + It contains a new function av_buffersrc_buffer() that allows passing + frames to the 'buffer' filter, but unlike av_vsrc_buffer_add_frame() + it allows for direct rendering. + 1c9e340 Add avfilter_copy_frame_props() for copying properties from + AVFrame to AVFilterBufferRef. + +2011-12-25 - lavc 53.31.0 + Add the following new fields to AVFrame: + b58dbb5 sample_aspect_ratio + 3a2ddf7 width, height + 8a4a5f6 format + 2011-12-18 - 8400b12 - lavc 53.28.1 Deprecate AVFrame.age. The field is unused. diff --git a/libavcodec/version.h b/libavcodec/version.h index 80e12c14ea..f664795824 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -21,7 +21,7 @@ #define AVCODEC_VERSION_H #define LIBAVCODEC_VERSION_MAJOR 53 -#define LIBAVCODEC_VERSION_MINOR 30 +#define LIBAVCODEC_VERSION_MINOR 31 #define LIBAVCODEC_VERSION_MICRO 0 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index a5cc8b2569..70efc5cd3c 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -30,8 +30,8 @@ #include "libavcodec/avcodec.h" #define LIBAVFILTER_VERSION_MAJOR 2 -#define LIBAVFILTER_VERSION_MINOR 13 -#define LIBAVFILTER_VERSION_MICRO 1 +#define LIBAVFILTER_VERSION_MINOR 14 +#define LIBAVFILTER_VERSION_MICRO 0 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ LIBAVFILTER_VERSION_MINOR, \ From 80dc7c0160fb27e46fc0caae8af10b3d63730c7c Mon Sep 17 00:00:00 2001 From: Anton Khirnov Date: Sun, 25 Dec 2011 19:35:19 +0100 Subject: [PATCH 12/12] avs: call release_buffer() at the end. Fixes a memleak. --- libavcodec/avs.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/libavcodec/avs.c b/libavcodec/avs.c index 3ccded3f8c..b3cd5b1478 100644 --- a/libavcodec/avs.c +++ b/libavcodec/avs.c @@ -161,6 +161,15 @@ static av_cold int avs_decode_init(AVCodecContext * avctx) return 0; } +static av_cold int avs_decode_end(AVCodecContext *avctx) +{ + AvsContext *s = avctx->priv_data; + if (s->picture.data[0]) + avctx->release_buffer(avctx, &s->picture); + return 0; +} + + AVCodec ff_avs_decoder = { .name = "avs", .type = AVMEDIA_TYPE_VIDEO, @@ -168,6 +177,7 @@ AVCodec ff_avs_decoder = { .priv_data_size = sizeof(AvsContext), .init = avs_decode_init, .decode = avs_decode_frame, + .close = avs_decode_end, .capabilities = CODEC_CAP_DR1, .long_name = NULL_IF_CONFIG_SMALL("AVS (Audio Video Standard) video"), };