3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-01-30 14:52:17 +00:00

accelerated rdp client for RPI

This commit is contained in:
Scare Crowe 2021-05-27 19:57:17 +05:00
parent 70b307f8f1
commit 85f03b1a17
34 changed files with 7949 additions and 0 deletions

21
rdp-acceleraed/LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 jean343
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

71
rdp-acceleraed/README.md Normal file
View File

@ -0,0 +1,71 @@
RPI-GPU-rdpClient
=================
Video on youtube: http://youtu.be/3HJuHhiXxuE
Hardware accelerated raspberry pi client for windows PC.
It is more a proof-of-concept to show that OpenMAX can be used as a RDP viewer rather than a finished product.
There is no authentication, use at your own risk.
It uses a NVIDIA graphic card to encode H.264 video, and OpenMAX to display the video. It can achieve 1080P 60FPS RDP on a RPI with a relatively low latency of ~200ms on two monitors.
When the GPU is not accessible on the server, it falls back to CPU encoding at a lower FPS, around 10FPS depending on the CPU.
It uses DXGI for accelerated desktop capture in Windows 8
It can work in a Virtual machine in order to be a true thin client.
### To compile the client on the Raspberry PI ###
It needs the following packages. I started on a clean version of the Raspberian OS.
```
sudo apt-get install cmake
sudo apt-get install libboost-thread-dev libboost-system-dev
sudo apt-get install libx11-dev
```
To compile ilclient:
```
cd /opt/vc/src/hello_pi
sudo ./rebuild.sh
```
To compile the RDP client:
```
git clone https://github.com/jean343/RPI-GPU-rdpClient.git
cd RPI-GPU-rdpClient/RPI-Client
mkdir build && cd build/
cmake ..
make
```
### To run the client ###
./client <host> <port>
###To compile the server in windows###
- See the guide at https://github.com/jean343/RPI-GPU-rdpClient/blob/master/WindowsCompileGuide.md
- Optional, FFMPEG for a CPU fallback if the graphic card is unavailable
- Download FFMPEG from http://ffmpeg.zeranoe.com/builds/, need the dev and shared
- Set FFMPEG_ROOT to the root of FFMPEG dev folder
- Add the bin folder of the shared zip to your path, or copy the DLLs
### To run the server ###
./server monitor 0 port 8080
### Contribute ###
Want to be part of the project? Great! All are welcome! We will get there quicker together :)
Whether you find a bug, have a great feature request feel free to get in touch.
### Known issues and limitations ###
- There is no audio
- There is no authentication, use only in a local LAN or under a VPN.
- The software falls back to CPU encoding in a Virtual Machine, it is fast as it uses the x264 superfast preset, but the H.264 quality is reduced.
### NOTES ###
From https://github.com/Hexxeh/rpi-update, update your pi:
```
sudo rpi-update
```
Update software:
```
sudo apt-get update && sudo apt-get upgrade
```

View File

@ -0,0 +1,11 @@
cmake_minimum_required(VERSION 2.8)
project( client )
FIND_PACKAGE( Boost REQUIRED COMPONENTS thread )
include_directories("/opt/vc/include/interface/vcos/pthreads/;/opt/vc/include/interface/vmcs_host/linux")
add_executable( client client.cpp )
include_directories(/opt/vc/include /opt/vc/src/hello_pi/libs/ilclient/ ${Boost_INCLUDE_DIR})
target_link_libraries( client X11 /opt/vc/src/hello_pi/libs/ilclient/libilclient.a boost_system /opt/vc/lib/libbcm_host.so /opt/vc/lib/libopenmaxil.so /opt/vc/lib/libvcos.so /opt/vc/lib/libGLESv2.so /opt/vc/lib/libEGL.so ${Boost_LIBRARIES} )

View File

@ -0,0 +1,371 @@
/*
Copyright (c) 2012, Broadcom Europe Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
// Copyright (c) 2003-2013 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bcm_host.h"
extern "C" {
#include "ilclient.h"
}
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <stdio.h>
#include <X11/Xlib.h>
#include <X11/XKBlib.h>
#include "mouse.h"
using boost::asio::ip::tcp;
using namespace std;
enum {
max_length = 1024
};
static int start_decode_video(char* host, char* port) {
OMX_VIDEO_PARAM_PORTFORMATTYPE format;
OMX_TIME_CONFIG_CLOCKSTATETYPE cstate;
COMPONENT_T *video_decode = NULL, *video_scheduler = NULL, *video_render = NULL, *clock = NULL;
COMPONENT_T * list[5];
TUNNEL_T tunnel[4];
ILCLIENT_T *client;
int status = 0;
unsigned int data_len = 0;
memset(list, 0, sizeof (list));
memset(tunnel, 0, sizeof (tunnel));
if ((client = ilclient_init()) == NULL) {
return -3;
}
if (OMX_Init() != OMX_ErrorNone) {
ilclient_destroy(client);
return -4;
}
// create video_decode
if (ilclient_create_component(client, &video_decode, "video_decode", (ILCLIENT_CREATE_FLAGS_T)(ILCLIENT_DISABLE_ALL_PORTS | ILCLIENT_ENABLE_INPUT_BUFFERS)) != 0)
status = -14;
list[0] = video_decode;
// create video_render
if (status == 0 && ilclient_create_component(client, &video_render, "video_render", ILCLIENT_DISABLE_ALL_PORTS) != 0)
status = -14;
list[1] = video_render;
// create clock
if (status == 0 && ilclient_create_component(client, &clock, "clock", ILCLIENT_DISABLE_ALL_PORTS) != 0)
status = -14;
list[2] = clock;
memset(&cstate, 0, sizeof (cstate));
cstate.nSize = sizeof (cstate);
cstate.nVersion.nVersion = OMX_VERSION;
cstate.eState = OMX_TIME_ClockStateWaitingForStartTime;
cstate.nWaitMask = 1;
if (clock != NULL && OMX_SetParameter(ILC_GET_HANDLE(clock), OMX_IndexConfigTimeClockState, &cstate) != OMX_ErrorNone)
status = -13;
// create video_scheduler
if (status == 0 && ilclient_create_component(client, &video_scheduler, "video_scheduler", ILCLIENT_DISABLE_ALL_PORTS) != 0)
status = -14;
list[3] = video_scheduler;
set_tunnel(tunnel, video_decode, 131, video_scheduler, 10);
set_tunnel(tunnel + 1, video_scheduler, 11, video_render, 90);
set_tunnel(tunnel + 2, clock, 80, video_scheduler, 12);
// setup clock tunnel first
if (status == 0 && ilclient_setup_tunnel(tunnel + 2, 0, 0) != 0)
status = -15;
else
ilclient_change_component_state(clock, OMX_StateExecuting);
if (status == 0)
ilclient_change_component_state(video_decode, OMX_StateIdle);
memset(&format, 0, sizeof (OMX_VIDEO_PARAM_PORTFORMATTYPE));
format.nSize = sizeof (OMX_VIDEO_PARAM_PORTFORMATTYPE);
format.nVersion.nVersion = OMX_VERSION;
format.nPortIndex = 130;
format.eCompressionFormat = OMX_VIDEO_CodingAVC;
if (status == 0 &&
OMX_SetParameter(ILC_GET_HANDLE(video_decode), OMX_IndexParamVideoPortFormat, &format) == OMX_ErrorNone &&
ilclient_enable_port_buffers(video_decode, 130, NULL, NULL, NULL) == 0) {
OMX_BUFFERHEADERTYPE *buf;
int port_settings_changed = 0;
int first_packet = 1;
ilclient_change_component_state(video_decode, OMX_StateExecuting);
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
tcp::resolver::query query(tcp::v4(), host, port);
tcp::resolver::iterator iterator = resolver.resolve(query);
tcp::socket s(io_service);
boost::asio::connect(s, iterator);
s.set_option(tcp::no_delay(true));
boost::asio::write(s, boost::asio::buffer("a", 2));
while ((buf = ilclient_get_input_buffer(video_decode, 130, 1)) != NULL) {
// feed data and wait until we get port settings changed
unsigned char *dest = buf->pBuffer;
int bufferSize = std::min((int)buf->nAllocLen, 10 * 1024);
data_len += boost::asio::read(s, boost::asio::buffer(dest, bufferSize));
if (port_settings_changed == 0 &&
((data_len > 0 && ilclient_remove_event(video_decode, OMX_EventPortSettingsChanged, 131, 0, 0, 1) == 0) ||
(data_len == 0 && ilclient_wait_for_event(video_decode, OMX_EventPortSettingsChanged, 131, 0, 0, 1,
ILCLIENT_EVENT_ERROR | ILCLIENT_PARAMETER_CHANGED, 10000) == 0))) {
port_settings_changed = 1;
if (ilclient_setup_tunnel(tunnel, 0, 0) != 0) {
status = -7;
break;
}
ilclient_change_component_state(video_scheduler, OMX_StateExecuting);
// now setup tunnel to video_render
if (ilclient_setup_tunnel(tunnel + 1, 0, 1000) != 0) {
status = -12;
break;
}
ilclient_change_component_state(video_render, OMX_StateExecuting);
}
if (!data_len)
break;
buf->nFilledLen = data_len;
data_len = 0;
buf->nOffset = 0;
if (first_packet) {
buf->nFlags = OMX_BUFFERFLAG_STARTTIME;
first_packet = 0;
} else
buf->nFlags = OMX_BUFFERFLAG_TIME_UNKNOWN;
if (OMX_EmptyThisBuffer(ILC_GET_HANDLE(video_decode), buf) != OMX_ErrorNone) {
status = -6;
break;
}
}
buf->nFilledLen = 0;
buf->nFlags = OMX_BUFFERFLAG_TIME_UNKNOWN | OMX_BUFFERFLAG_EOS;
if (OMX_EmptyThisBuffer(ILC_GET_HANDLE(video_decode), buf) != OMX_ErrorNone)
status = -20;
// wait for EOS from render
ilclient_wait_for_event(video_render, OMX_EventBufferFlag, 90, 0, OMX_BUFFERFLAG_EOS, 0,
ILCLIENT_BUFFER_FLAG_EOS, 10000);
// need to flush the renderer to allow video_decode to disable its input port
ilclient_flush_tunnels(tunnel, 0);
ilclient_disable_port_buffers(video_decode, 130, NULL, NULL, NULL);
}
ilclient_disable_tunnel(tunnel);
ilclient_disable_tunnel(tunnel + 1);
ilclient_disable_tunnel(tunnel + 2);
ilclient_teardown_tunnels(tunnel);
ilclient_state_transition(list, OMX_StateIdle);
ilclient_state_transition(list, OMX_StateLoaded);
ilclient_cleanup_components(list);
OMX_Deinit();
ilclient_destroy(client);
return status;
}
char *key_name[] = {
"first",
"second (or middle)",
"third"
};
struct SendStruct {
int type;
int x;
int y;
int button;
int keycode;
};
static void FillRect( void *image, int pitch, int x, int y, int w, int h, int val )
{
int row;
int col;
uint32_t *line = (uint32_t *)image + y * (pitch>>2) + x;
for ( row = 0; row < h; row++ )
{
for ( col = 0; col < w; col++ )
{
line[col] = val;
}
line += (pitch>>2);
}
}
void mouseKeyboardThread(char* host, char* port)
{
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
tcp::resolver::query query(tcp::v4(), host, port);
tcp::resolver::iterator iterator = resolver.resolve(query);
tcp::socket s(io_service);
boost::asio::connect(s, iterator);
s.set_option(tcp::no_delay(true));
boost::asio::write(s, boost::asio::buffer("b", 2));
Display *display;
XEvent xevent;
Window window;
if( (display = XOpenDisplay(NULL)) == NULL )
return;
window = DefaultRootWindow(display);
XAllowEvents(display, AsyncBoth, CurrentTime);
XGrabPointer(display,
window,
1,
PointerMotionMask | ButtonPressMask | ButtonReleaseMask ,
GrabModeAsync,
GrabModeAsync,
None,
None,
CurrentTime);
XGrabKeyboard(display, window, false, GrabModeAsync, GrabModeAsync, CurrentTime);
Mouse mouse;
while(1) {
XNextEvent(display, &xevent);
int mykey;
SendStruct send;
SendStruct* sendPtr = &send;
switch (xevent.type) {
case MotionNotify:
send.type = 0;
send.x = xevent.xmotion.x_root;
send.y = xevent.xmotion.y_root;
mouse.move(send.x, send.y);
//printf("Mouse move : [%d, %d]\n", xevent.xmotion.x_root, xevent.xmotion.y_root);
break;
case ButtonPress:
send.type = 1;
send.button = xevent.xbutton.button;
//printf("Button pressed : %s\n", key_name[xevent.xbutton.button - 1]);
break;
case ButtonRelease:
send.type = 2;
send.button = xevent.xbutton.button;
//printf("Button released : %s\n", key_name[xevent.xbutton.button - 1]);
break;
case KeyPress:
mykey = XKeycodeToKeysym(display, xevent.xkey.keycode, 0);
//printf("KeyPress : %s, %d\n", XKeysymToString(mykey), mykey);
send.type = 3;
send.keycode = mykey;
if (xevent.xkey.keycode == 27 || xevent.xkey.keycode == 9) {
return;
}
break;
case KeyRelease:
mykey = XKeycodeToKeysym(display, xevent.xkey.keycode, 0);
send.type = 4;
send.keycode = mykey;
//printf("KeyRelease : %s, %d\n", XKeysymToString(mykey), mykey);
break;
}
boost::asio::write(s, boost::asio::buffer(sendPtr, sizeof(SendStruct)));
}
mouse.close();
}
int main(int argc, char **argv) {
std::cout << "Version 0.9" << endl;
if (argc != 3)
{
std::cerr << "Usage: ./client <host> <port>\n";
return 1;
}
char* host = argv[1];
char* port = argv[2];
bcm_host_init();
boost::thread t(&mouseKeyboardThread, host, port);
start_decode_video(host, port);
t.join();
}

View File

@ -0,0 +1,71 @@
#include <stdio.h>
#include <X11/Xlib.h>
#include <X11/XKBlib.h>
char *key_name[] = {
"first",
"second (or middle)",
"third"
};
struct SendStruct {
int type;
int x;
int y;
};
int main(int argc, char **argv)
{
Display *display;
XEvent xevent;
Window window;
if( (display = XOpenDisplay(NULL)) == NULL )
return -1;
window = DefaultRootWindow(display);
XAllowEvents(display, AsyncBoth, CurrentTime);
XGrabPointer(display,
window,
1,
PointerMotionMask | ButtonPressMask | ButtonReleaseMask ,
GrabModeAsync,
GrabModeAsync,
None,
None,
CurrentTime);
XGrabKeyboard(display, window, false, GrabModeAsync, GrabModeAsync, CurrentTime);
while(1) {
XNextEvent(display, &xevent);
int mykey;
switch (xevent.type) {
case MotionNotify:
printf("Mouse move : [%d, %d]\n", xevent.xmotion.x_root, xevent.xmotion.y_root);
break;
case ButtonPress:
printf("Button pressed : %s, %d\n", key_name[xevent.xbutton.button - 1], xevent.xbutton.button);
break;
case ButtonRelease:
printf("Button released : %s, %d\n", key_name[xevent.xbutton.button - 1], xevent.xbutton.button);
break;
case KeyPress:
mykey = XKeycodeToKeysym(display, xevent.xkey.keycode, 0);
printf("KeyPress : %s, %d\n", XKeysymToString(mykey), mykey);
if (xevent.xkey.keycode == 27 || xevent.xkey.keycode == 9) {
return 0;
}
break;
case KeyRelease:
mykey = XKeycodeToKeysym(display, xevent.xkey.keycode, 0);
printf("KeyRelease : %s, %d\n", XKeysymToString(mykey), mykey);
break;
}
}
return 0;
}

View File

@ -0,0 +1,127 @@
#pragma once
#include "bcm_host.h"
typedef struct
{
DISPMANX_DISPLAY_HANDLE_T display;
DISPMANX_MODEINFO_T info;
void *image;
DISPMANX_UPDATE_HANDLE_T update;
DISPMANX_RESOURCE_HANDLE_T resource;
DISPMANX_ELEMENT_HANDLE_T element;
uint32_t vc_image_ptr;
} RECT_VARS_T;
static RECT_VARS_T gRectVars;
class Mouse {
public:
RECT_VARS_T *vars;
VC_IMAGE_TYPE_T type;
Mouse(){
type = VC_IMAGE_ARGB8888;
int width=6;
int height=6;
uint32_t screen = 0;
int ret;
VC_RECT_T src_rect;
VC_RECT_T dst_rect;
VC_DISPMANX_ALPHA_T alpha = { (DISPMANX_FLAGS_ALPHA_T)(DISPMANX_FLAGS_ALPHA_FROM_SOURCE | DISPMANX_FLAGS_ALPHA_FIXED_ALL_PIXELS) ,
255, //alpha 0->255
0 };
vars = &gRectVars;
bcm_host_init();
printf("Open display[%i]...\n", screen );
vars->display = vc_dispmanx_display_open( screen );
ret = vc_dispmanx_display_get_info( vars->display, &vars->info);
assert(ret == 0);
printf( "Display is %d x %d\n", vars->info.width, vars->info.height );
vars->resource = vc_dispmanx_resource_create( type,
width,
height,
&vars->vc_image_ptr );
assert( vars->resource );
vars->update = vc_dispmanx_update_start( 10 );
assert( vars->update );
vc_dispmanx_rect_set( &src_rect, 0, 0, width << 16, height << 16 );
// Full screen
vc_dispmanx_rect_set( &dst_rect, 0, 0, width, height );
vars->element = vc_dispmanx_element_add( vars->update,
vars->display,
2000, // layer
&dst_rect,
vars->resource,
&src_rect,
DISPMANX_PROTECTION_NONE,
&alpha,
NULL, // clamp
DISPMANX_NO_ROTATE );
vc_dispmanx_rect_set( &dst_rect, 0, 0, width, height);
uint16_t *image = (uint16_t *)calloc( 1, width*4*height );
memset(image, 0xFF, width*4*height);
ret = vc_dispmanx_resource_write_data( vars->resource,
type,
width*4,//image.step,
image,
&dst_rect );
ret = vc_dispmanx_update_submit_sync( vars->update );
assert( ret == 0 );
}
void move(int x, int y){
int ret;
VC_RECT_T dst_rect;
vars->update = vc_dispmanx_update_start( 10 );
vc_dispmanx_rect_set( &dst_rect, x, y, 6, 6);
ret = vc_dispmanx_element_change_attributes(
vars->update,
vars->element,
/*ELEMENT_CHANGE_DEST_RECT*/ (1<<2),
0,
0,
&dst_rect,
NULL,
DISPMANX_NO_HANDLE,
DISPMANX_NO_ROTATE);
assert( ret == DISPMANX_SUCCESS );
/* Submit asynchronously, otherwise the performance suffers a lot */
ret = vc_dispmanx_update_submit( vars->update, 0, NULL );
assert( ret == DISPMANX_SUCCESS );
}
void close(){
int ret;
vars->update = vc_dispmanx_update_start( 10 );
assert( vars->update );
ret = vc_dispmanx_element_remove( vars->update, vars->element );
assert( ret == 0 );
ret = vc_dispmanx_update_submit_sync( vars->update );
assert( ret == 0 );
ret = vc_dispmanx_resource_delete( vars->resource );
assert( ret == 0 );
ret = vc_dispmanx_display_close( vars->display );
assert( ret == 0 );
}
};

View File

@ -0,0 +1,121 @@
cmake_minimum_required(VERSION 2.8)
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}")
project( server )
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
SET(USE_CUDA on CACHE BOOL "Use CUDA")
SET(USE_WDDM on CACHE BOOL "Use WDDM for screen capture")
SET(USE_NVENC on CACHE BOOL "Use Nvidia encoder")
# BOOST
set(Boost_USE_STATIC_LIBS ON)
set(Boost_USE_MULTITHREADED ON)
find_package( Boost REQUIRED COMPONENTS thread )
if(Boost_FOUND)
message("Boost found!")
endif()
include_directories(${Boost_INCLUDE_DIR})
LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
# FFMPEG
set(FFMPEG_ROOT "" CACHE FILEPATH "Root of the FFMPEG directory, which has README.txt")
if (FFMPEG_ROOT)
FIND_PATH( FFMPEG_INCLUDE_DIR libavcodec/avcodec.h
${FFMPEG_ROOT}/include
)
include_directories(${FFMPEG_INCLUDE_DIR})
FIND_LIBRARY( FFMPEG_LIBRARY_avcodec avcodec
${FFMPEG_ROOT}/lib
)
FIND_LIBRARY( FFMPEG_LIBRARY_avutil avutil
${FFMPEG_ROOT}/lib
)
if (FFMPEG_LIBRARY_avcodec AND FFMPEG_LIBRARY_avutil)
set (FFMPEG_FOUND 1)
set (FFMPEG_LIBRARIES "${FFMPEG_LIBRARY_avcodec};${FFMPEG_LIBRARY_avutil}")
message("FFMPEG found!")
endif()
endif()
# DXGI and CUDA
if (USE_CUDA)
find_package(CUDA)
endif()
if(CUDA_FOUND)
set (HAS_CUDA 1)
message("CUDA found!")
include_directories(${CUDA_TOOLKIT_INCLUDE})
CUDA_ADD_LIBRARY(cudalib STATIC
color_conversion.h
color_conversion.cu
OPTIONS -arch sm_30
)
TARGET_LINK_LIBRARIES(cudalib ${CUDA_LIBRARIES})
set (CUDA_LINK_LIBRARIES "${CUDA_CUDA_LIBRARY};${CUDA_CUDART_LIBRARY};cudalib")
endif()
if (USE_WDDM OR USE_NVENC)
find_package( DirectX )
endif()
include_directories(${DXGI_INCLUDES})
include_directories(${Boost_INCLUDE_DIRS})
if (USE_WDDM AND DIRECTX_FOUND)
set (HAS_WDDM 1)
SET(Capture_HEADER
wddm.h
WDDMCapture.h
)
SET(Capture_LIBRARIES
"${DXGI_LIBRARIES}"
)
else()
SET(Capture_HEADER
GDICapture.h
)
endif()
if (DIRECTX_FOUND AND USE_NVENC)
set (HAS_NVENC 1)
#files for NVEncoder
SET(ENCODER_SOURCE
NvEncoder/NvHWEncoder.cpp
)
SET(ENCODER_HEADERS
NV_encoding.hpp
NvEncoder/NvEncoder.h
NvEncoder/NvHWEncoder.h
NvEncoder/nvEncodeAPI.h
)
elseif (FFMPEG_FOUND)
set (HAS_FFMPEG 1)
SET(ENCODER_SOURCE
)
SET(ENCODER_HEADERS
FFMPEG_encoding.hpp
)
SET(ENCODER_LIBRARIES
"${FFMPEG_LIBRARIES}"
)
endif()
SET(COMMON_SOURCE
config.h
bounded_buffer.h
Capture.h
fps.h
monitor.h
params.h
)
add_executable( server server.cpp ${COMMON_SOURCE} ${ENCODER_SOURCE} ${ENCODER_HEADERS} ${Capture_HEADER} )
target_link_libraries( server ${Boost_LIBRARIES} ${ENCODER_LIBRARIES} ${Capture_LIBRARIES} ${CUDA_LINK_LIBRARIES})
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/config.h)

View File

@ -0,0 +1,8 @@
#pragma once
class Capture {
public:
virtual void init(UINT monitorID, RECT screen) = 0;
virtual int getNextFrame(RGBQUAD**) = 0;
virtual void doneNextFrame() = 0;
};

View File

@ -0,0 +1,194 @@
/*
* Copyright (c) 2001 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <math.h>
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
};
using namespace boost::asio;
using ip::tcp;
typedef boost::shared_ptr<tcp::socket> socket_ptr;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
class FFMPEG_encoding {
public:
void load(int width, int height, socket_ptr sock) {
this->sock = sock;
c = NULL;
codec_id = AV_CODEC_ID_H264;
i=0;
avcodec_register_all();
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = width;
c->height = height;
/* frames per second */
AVRational r;
r.den=1;
r.num=25;
c->time_base = r;
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 10;
c->max_b_frames = 0;
c->refs = 0;
c->pix_fmt = AV_PIX_FMT_YUV420P;//AV_PIX_FMT_YUV444P;
// ultrafast,superfast, veryfast, faster, fast, medium, slow, slower, veryslow
if (codec_id == AV_CODEC_ID_H264) {
av_opt_set(c->priv_data, "preset", "veryfast", 0);
av_opt_set(c->priv_data, "tune", "zerolatency", 0);
av_opt_set(c->priv_data, "movflags", "faststart", 0);
}
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
int ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
}
void write(int width, int height, RGBQUAD *pPixels) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
for (int y = 0; y < c->height; y++) {
for (int x = 0; x < c->width; x++) {
RGBQUAD px = pPixels[y*width+x];
int Y = ( ( 66 * px.rgbRed + 129 * px.rgbGreen + 25 * px.rgbBlue + 128) >> 8) + 16;
int U = ( ( -38 * px.rgbRed - 74 * px.rgbGreen + 112 * px.rgbBlue + 128) >> 8) + 128;
int V = ( ( 112 * px.rgbRed - 94 * px.rgbGreen - 18 * px.rgbBlue + 128) >> 8) + 128;
frame->data[0][y * frame->linesize[0] + x] = Y;
//frame->data[1][y * frame->linesize[0] + x] = U;
//frame->data[2][y * frame->linesize[0] + x] = V;
frame->data[1][(y >> 1) * frame->linesize[1] + (x >> 1)] = U;
frame->data[2][(y >> 1) * frame->linesize[2] + (x >> 1)] = V;
}
}
frame->pts = i;
i++;
/* encode the image */
int got_output;
int ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame (size=%5d)\n", pkt.size);
//fwrite(pkt.data, 1, pkt.size, f);
boost::asio::write(*sock, buffer((char*)pkt.data, pkt.size));
av_free_packet(&pkt);
}
}
void close () {
/* get the delayed frames */
/*for (got_output = 1; got_output; i++) {
fflush(stdout);
int ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}*/
/* add sequence end code to have a real mpeg file */
//fwrite(endcode, 1, sizeof(endcode), f);
//fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
}
private:
AVCodecID codec_id;
AVCodec *codec;
AVCodecContext *c;
AVFrame *frame;
AVPacket pkt;
socket_ptr sock;
int i;
};

View File

@ -0,0 +1,31 @@
FIND_PATH( DXGI_INCLUDE dxgi1_2.h
"C:/Program Files (x86)/Windows Kits/10/Include/10.0.17763.0/shared"
"C:/Program Files (x86)/Windows Kits/10/Include/10.0.17134.0/shared"
"C:/Program Files (x86)/Windows Kits/8.1/Include/shared"
"C:/Program Files (x86)/Windows Kits/8.0/Include/shared"
"C:/Program Files/Windows Kits/8.1/Include/shared"
"C:/Program Files/Windows Kits/8.0/Include/shared"
)
FIND_LIBRARY( DXGI_LIBRARY1 d3d11
"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.17763.0/um/x64"
"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.17134.0/um/x64"
"C:/Program Files (x86)/Windows Kits/8.1/Lib/winv6.3/um/x86"
"C:/Program Files (x86)/Windows Kits/8.0/Lib/winv6.3/um/x86"
"C:/Program Files/Windows Kits/8.1/Lib/winv6.3/um/x86"
"C:/Program Files/Windows Kits/8.0/Lib/winv6.3/um/x86"
)
FIND_LIBRARY( DXGI_LIBRARY2 Dxgi
"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.17763.0/um/x64"
"C:/Program Files (x86)/Windows Kits/10/Lib/10.0.17134.0/um/x64"
"C:/Program Files (x86)/Windows Kits/8.1/Lib/winv6.3/um/x86"
"C:/Program Files (x86)/Windows Kits/8.0/Lib/winv6.3/um/x86"
"C:/Program Files/Windows Kits/8.1/Lib/winv6.3/um/x86"
"C:/Program Files/Windows Kits/8.0/Lib/winv6.3/um/x86"
)
if (DXGI_INCLUDE AND DXGI_LIBRARY1 AND DXGI_LIBRARY2 )
set (DIRECTX_FOUND 1)
set (DXGI_INCLUDES "${DXGI_INCLUDE}")
set (DXGI_LIBRARIES "${DXGI_LIBRARY1};${DXGI_LIBRARY2}")
message("DIRECTX found!")
endif()

View File

@ -0,0 +1,63 @@
#pragma once
#include "Capture.h"
class GDICapture : public Capture {
public:
void init(UINT monitorID, RECT screen)
{
this->screen = screen;
hdc = GetDC(NULL); // get the desktop device context
hDest = CreateCompatibleDC(hdc); // create a device context to use yourself
// get the height and width of the screen
height = screen.bottom - screen.top;
width = screen.right - screen.left;
int virtualScreenHeight = GetSystemMetrics(SM_CYVIRTUALSCREEN);
int virtualScreenWidth = GetSystemMetrics(SM_CXVIRTUALSCREEN);
// create a bitmap
hbDesktop = CreateCompatibleBitmap( hdc, virtualScreenWidth, virtualScreenHeight);
// use the previously created device context with the bitmap
SelectObject(hDest, hbDesktop);
bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
bmi.bmiHeader.biWidth = width;
bmi.bmiHeader.biHeight = -height;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 32;
bmi.bmiHeader.biCompression = BI_RGB;
pPixels = new RGBQUAD[width * height];
}
int getNextFrame(RGBQUAD** data)
{
// copy from the desktop device context to the bitmap device context
BitBlt( hDest, 0,0, width, height, hdc, screen.left, screen.top, SRCCOPY);
GetDIBits(
hDest,
hbDesktop,
0,
height,
pPixels,
&bmi,
DIB_RGB_COLORS
);
*data = pPixels;
return 0;
}
void doneNextFrame()
{
}
private:
HDC hdc, hDest;
int width, height;
RECT screen;
RGBQUAD *pPixels;
HBITMAP hbDesktop;
BITMAPINFO bmi;
};

View File

@ -0,0 +1,100 @@
using namespace boost::asio;
using ip::tcp;
typedef boost::shared_ptr<tcp::socket> socket_ptr;
#include "NvEncoder/NvEncoder.h"
#include "color_conversion.h"
class NV_encoding {
public:
void load(int width, int height, socket_ptr sock, UINT monitorID) {
NVENCSTATUS nvStatus = NV_ENC_SUCCESS;
this->sock = sock;
this->width = width;
this->height = height;
cNvEncoder = new CNvEncoder();
cNvEncoder->InitCuda();
nvStatus = cNvEncoder->Initialize(NV_ENC_DEVICE_TYPE_CUDA);
nvStatus = cNvEncoder->CreateEncoder(width, height);
nvStatus = cNvEncoder->AllocateIOBuffers(width, height, false);
dataPacket = new DataPacket();
dataPacket->data = new uint8_t[width*height];
yuv[0] = new uint8_t[width*height];
yuv[1] = new uint8_t[width*height / 4];
yuv[2] = new uint8_t[width*height / 4];
// Init avi file
//char buffer[255];
//sprintf(buffer, "C:\\Monitor%d.avi", monitorID);
//ofs.open(buffer, std::ofstream::out | std::ofstream::binary);
}
void write(int width, int height, RGBQUAD *pPixels) {
bool rc = RGB_to_YV12(width, height, pPixels, yuv[0], yuv[1], yuv[2]);
if (!rc){
// The Cuda function RGB_to_YV12 failed, do CPU conversion
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
RGBQUAD px = pPixels[y*width+x];
int Y = ( ( 66 * px.rgbRed + 129 * px.rgbGreen + 25 * px.rgbBlue + 128) >> 8) + 16;
int U = ( ( -38 * px.rgbRed - 74 * px.rgbGreen + 112 * px.rgbBlue + 128) >> 8) + 128;
int V = ( ( 112 * px.rgbRed - 94 * px.rgbGreen - 18 * px.rgbBlue + 128) >> 8) + 128;
yuv[0][y * width + x] = Y;
yuv[1][(y >> 1) * (width >> 1) + (x >> 1)] = U;
yuv[2][(y >> 1) * (width >> 1) + (x >> 1)] = V;
}
}
}
EncodeFrameConfig stEncodeFrame;
memset(&stEncodeFrame, 0, sizeof(stEncodeFrame));
stEncodeFrame.yuv[0] = yuv[0];
stEncodeFrame.yuv[1] = yuv[1];
stEncodeFrame.yuv[2] = yuv[2];
stEncodeFrame.stride[0] = width;
stEncodeFrame.stride[1] = width/2;
stEncodeFrame.stride[2] = width/2;
stEncodeFrame.width = width;
stEncodeFrame.height = height;
cNvEncoder->EncodeFrame(&stEncodeFrame, dataPacket, false, width, height);
if (dataPacket->size > 0) {
printf("Write frame (size=%5d)\n", dataPacket->size);
//ofs.write((char*)dataPacket->data, dataPacket->size);
boost::asio::write(*sock, buffer((char*)dataPacket->data, dataPacket->size));
}
}
void close () {
delete cNvEncoder;
delete dataPacket->data;
delete dataPacket;
for (int i = 0; i < 3; i ++)
{
if (yuv[i])
{
delete [] yuv[i];
}
}
}
private:
int width;
int height;
socket_ptr sock;
uint8_t *yuv[3];
CNvEncoder* cNvEncoder;
DataPacket* dataPacket;
//std::ofstream ofs;
};

View File

@ -0,0 +1,503 @@
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#if defined(NV_WINDOWS)
#include <d3d9.h>
#include <d3d10_1.h>
#include <d3d11.h>
#pragma warning(disable : 4996)
#endif
//#pragma comment (lib, "cuda.lib")
#pragma comment (lib, "d3d9.lib")
#pragma comment (lib, "d3d10.lib")
#pragma comment (lib, "d3d11.lib")
#include "NvHWEncoder.h"
#include "nvEncodeAPI.h"
#include "nvUtils.h"
#define MAX_ENCODE_QUEUE 32
#define BITSTREAM_BUFFER_SIZE 2 * 1024 * 1024
#define SET_VER(configStruct, type) {configStruct.version = type##_VER;}
template<class T>
class CNvQueue {
T** m_pBuffer;
unsigned int m_uSize;
unsigned int m_uPendingCount;
unsigned int m_uAvailableIdx;
unsigned int m_uPendingndex;
public:
CNvQueue(): m_pBuffer(NULL), m_uSize(0), m_uPendingCount(0), m_uAvailableIdx(0),
m_uPendingndex(0)
{
}
~CNvQueue()
{
delete[] m_pBuffer;
}
bool Initialize(T *pItems, unsigned int uSize)
{
m_uSize = uSize;
m_uPendingCount = 0;
m_uAvailableIdx = 0;
m_uPendingndex = 0;
m_pBuffer = new T *[m_uSize];
for (unsigned int i = 0; i < m_uSize; i++)
{
m_pBuffer[i] = &pItems[i];
}
return true;
}
T * GetAvailable()
{
T *pItem = NULL;
if (m_uPendingCount == m_uSize)
{
return NULL;
}
pItem = m_pBuffer[m_uAvailableIdx];
m_uAvailableIdx = (m_uAvailableIdx+1)%m_uSize;
m_uPendingCount += 1;
return pItem;
}
T* GetPending()
{
if (m_uPendingCount == 0)
{
return NULL;
}
T *pItem = m_pBuffer[m_uPendingndex];
m_uPendingndex = (m_uPendingndex+1)%m_uSize;
m_uPendingCount -= 1;
return pItem;
}
};
typedef struct _EncodeFrameConfig
{
uint8_t *yuv[3];
uint32_t stride[3];
uint32_t width;
uint32_t height;
}EncodeFrameConfig;
typedef enum
{
NV_ENC_DX9 = 0,
NV_ENC_DX11 = 1,
NV_ENC_CUDA = 2,
NV_ENC_DX10 = 3,
} NvEncodeDeviceType;
class CNvEncoder
{
public:
CNvEncoder()
{
m_pNvHWEncoder = new CNvHWEncoder;
m_pDevice = NULL;
#if defined (NV_WINDOWS)
m_pD3D = NULL;
#endif
m_cuContext = NULL;
m_uEncodeBufferCount = 0;
memset(&m_stEncoderInput, 0, sizeof(m_stEncoderInput));
memset(&m_stEOSOutputBfr, 0, sizeof(m_stEOSOutputBfr));
memset(&m_stEncodeBuffer, 0, sizeof(m_stEncodeBuffer));
}
~CNvEncoder()
{
if (m_pNvHWEncoder)
{
delete m_pNvHWEncoder;
m_pNvHWEncoder = NULL;
}
}
NVENCSTATUS InitCuda(uint32_t deviceID = 0)
{
CUresult cuResult;
CUdevice device;
CUcontext cuContextCurr;
int deviceCount = 0;
int SMminor = 0, SMmajor = 0;
cuResult = cuInit(0);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuInit error:0x%x\n", cuResult);
assert(0);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
cuResult = cuDeviceGetCount(&deviceCount);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuDeviceGetCount error:0x%x\n", cuResult);
assert(0);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
// If dev is negative value, we clamp to 0
if ((int)deviceID < 0)
deviceID = 0;
if (deviceID >(unsigned int)deviceCount - 1)
{
PRINTERR("Invalid Device Id = %d\n", deviceID);
return NV_ENC_ERR_INVALID_ENCODERDEVICE;
}
cuResult = cuDeviceGet(&device, deviceID);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuDeviceGet error:0x%x\n", cuResult);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
cuResult = cuDeviceComputeCapability(&SMmajor, &SMminor, deviceID);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuDeviceComputeCapability error:0x%x\n", cuResult);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
if (((SMmajor << 4) + SMminor) < 0x30)
{
PRINTERR("GPU %d does not have NVENC capabilities exiting\n", deviceID);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
cuResult = cuCtxCreate((CUcontext*)(&m_pDevice), 0, device);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuCtxCreate error:0x%x\n", cuResult);
assert(0);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
cuResult = cuCtxPopCurrent(&cuContextCurr);
if (cuResult != CUDA_SUCCESS)
{
PRINTERR("cuCtxPopCurrent error:0x%x\n", cuResult);
assert(0);
return NV_ENC_ERR_NO_ENCODE_DEVICE;
}
return NV_ENC_SUCCESS;
}
NVENCSTATUS Initialize(NV_ENC_DEVICE_TYPE deviceType) {
NVENCSTATUS nvStatus = m_pNvHWEncoder->Initialize(m_pDevice, deviceType);
return nvStatus;
}
NVENCSTATUS CreateEncoder(int width, int height){
EncodeConfig encodeConfig;
memset(&encodeConfig, 0, sizeof(EncodeConfig));
encodeConfig.width = width;
encodeConfig.height = height;
// B = Encoding bitrate
int B = 1000 * 1024; // kbps
int fps = 20;
uint32_t maxFrameSize = B / fps; // bandwidth / frame rate
encodeConfig.vbvSize = maxFrameSize;
encodeConfig.endFrameIdx = INT_MAX;
encodeConfig.bitrate = encodeConfig.vbvSize * fps;
encodeConfig.vbvMaxBitrate = encodeConfig.vbvSize * fps;
encodeConfig.rcMode = NV_ENC_PARAMS_RC_VBR;//NV_ENC_PARAMS_RC_CONSTQP;
encodeConfig.gopLength = 200;//NVENC_INFINITE_GOPLENGTH;
encodeConfig.deviceType = NV_ENC_CUDA;
encodeConfig.codec = NV_ENC_H264;
encodeConfig.fps = fps;
encodeConfig.qp = 28;
encodeConfig.presetGUID = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;//NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;//NV_ENC_PRESET_DEFAULT_GUID;
encodeConfig.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
encodeConfig.isYuv444 = 0;
encodeConfig.presetGUID = m_pNvHWEncoder->GetPresetGUID(encodeConfig.encoderPreset, encodeConfig.codec);
printf("Encoding input : \"%s\"\n", encodeConfig.inputFileName);
printf(" output : \"%s\"\n", encodeConfig.outputFileName);
printf(" codec : \"%s\"\n", encodeConfig.codec == NV_ENC_HEVC ? "HEVC" : "H264");
printf(" size : %dx%d\n", encodeConfig.width, encodeConfig.height);
printf(" bitrate : %d bits/sec\n", encodeConfig.bitrate);
printf(" vbvMaxBitrate : %d bits/sec\n", encodeConfig.vbvMaxBitrate);
printf(" vbvSize : %d bits\n", encodeConfig.vbvSize);
printf(" fps : %d frames/sec\n", encodeConfig.fps);
printf(" rcMode : %s\n", encodeConfig.rcMode == NV_ENC_PARAMS_RC_CONSTQP ? "CONSTQP" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_VBR ? "VBR" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_CBR ? "CBR" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_VBR_MINQP ? "VBR MINQP" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_2_PASS_QUALITY ? "TWO_PASS_QUALITY" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP ? "TWO_PASS_FRAMESIZE_CAP" :
encodeConfig.rcMode == NV_ENC_PARAMS_RC_2_PASS_VBR ? "TWO_PASS_VBR" : "UNKNOWN");
if (encodeConfig.gopLength == NVENC_INFINITE_GOPLENGTH)
printf(" goplength : INFINITE GOP \n");
else
printf(" goplength : %d \n", encodeConfig.gopLength);
printf(" B frames : %d \n", encodeConfig.numB);
printf(" QP : %d \n", encodeConfig.qp);
printf(" Input Format : %s\n", encodeConfig.isYuv444 ? "YUV 444" : "YUV 420");
printf(" preset : %s\n", (encodeConfig.presetGUID == NV_ENC_PRESET_LOW_LATENCY_HQ_GUID) ? "LOW_LATENCY_HQ" :
(encodeConfig.presetGUID == NV_ENC_PRESET_LOW_LATENCY_HP_GUID) ? "LOW_LATENCY_HP" :
(encodeConfig.presetGUID == NV_ENC_PRESET_HQ_GUID) ? "HQ_PRESET" :
(encodeConfig.presetGUID == NV_ENC_PRESET_HP_GUID) ? "HP_PRESET" :
(encodeConfig.presetGUID == NV_ENC_PRESET_LOSSLESS_HP_GUID) ? "LOSSLESS_HP" : "LOW_LATENCY_DEFAULT");
printf(" Picture Structure : %s\n", (encodeConfig.pictureStruct == NV_ENC_PIC_STRUCT_FRAME) ? "Frame Mode" :
(encodeConfig.pictureStruct == NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM) ? "Top Field first" :
(encodeConfig.pictureStruct == NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP) ? "Bottom Field first" : "INVALID");
printf(" devicetype : %s\n", encodeConfig.deviceType == NV_ENC_DX9 ? "DX9" :
encodeConfig.deviceType == NV_ENC_DX10 ? "DX10" :
encodeConfig.deviceType == NV_ENC_DX11 ? "DX11" :
encodeConfig.deviceType == NV_ENC_CUDA ? "CUDA" : "INVALID");
printf("\n");
NVENCSTATUS nvStatus = m_pNvHWEncoder->CreateEncoder(&encodeConfig);
m_uEncodeBufferCount = encodeConfig.numB + 1; // min buffers is numb + 1 + 3 pipelining
m_uPicStruct = encodeConfig.pictureStruct;
return nvStatus;
}
NVENCSTATUS EncodeFrame(EncodeFrameConfig *pEncodeFrame, DataPacket* dataPacket, bool bFlush=false, uint32_t width=0, uint32_t height=0) {
NVENCSTATUS nvStatus = NV_ENC_SUCCESS;
uint32_t lockedPitch = 0;
EncodeBuffer *pEncodeBuffer = NULL;
if (bFlush)
{
FlushEncoder(dataPacket);
return NV_ENC_SUCCESS;
}
if (!pEncodeFrame)
{
return NV_ENC_ERR_INVALID_PARAM;
}
pEncodeBuffer = m_EncodeBufferQueue.GetAvailable();
if(!pEncodeBuffer)
{
m_pNvHWEncoder->ProcessOutput(m_EncodeBufferQueue.GetPending(), dataPacket);
pEncodeBuffer = m_EncodeBufferQueue.GetAvailable();
}
unsigned char *pInputSurface;
nvStatus = m_pNvHWEncoder->NvEncLockInputBuffer(pEncodeBuffer->stInputBfr.hInputSurface, (void**)&pInputSurface, &lockedPitch);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
if (pEncodeBuffer->stInputBfr.bufferFmt == NV_ENC_BUFFER_FORMAT_NV12_PL)
{
unsigned char *pInputSurfaceCh = pInputSurface + (pEncodeBuffer->stInputBfr.dwHeight*lockedPitch);
convertYUVpitchtoNV12(pEncodeFrame->yuv[0], pEncodeFrame->yuv[1], pEncodeFrame->yuv[2], pInputSurface, pInputSurfaceCh, width, height, width, lockedPitch);
}
else
{
unsigned char *pInputSurfaceCb = pInputSurface + (pEncodeBuffer->stInputBfr.dwHeight * lockedPitch);
unsigned char *pInputSurfaceCr = pInputSurfaceCb + (pEncodeBuffer->stInputBfr.dwHeight * lockedPitch);
convertYUVpitchtoYUV444(pEncodeFrame->yuv[0], pEncodeFrame->yuv[1], pEncodeFrame->yuv[2], pInputSurface, pInputSurfaceCb, pInputSurfaceCr, width, height, width, lockedPitch);
}
nvStatus = m_pNvHWEncoder->NvEncUnlockInputBuffer(pEncodeBuffer->stInputBfr.hInputSurface);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
nvStatus = m_pNvHWEncoder->NvEncEncodeFrame(pEncodeBuffer, NULL, width, height, (NV_ENC_PIC_STRUCT)m_uPicStruct);
return nvStatus;
}
NVENCSTATUS AllocateIOBuffers(uint32_t uInputWidth, uint32_t uInputHeight, uint32_t isYuv444)
{
NVENCSTATUS nvStatus = NV_ENC_SUCCESS;
m_EncodeBufferQueue.Initialize(m_stEncodeBuffer, m_uEncodeBufferCount);
for (uint32_t i = 0; i < m_uEncodeBufferCount; i++)
{
nvStatus = m_pNvHWEncoder->NvEncCreateInputBuffer(uInputWidth, uInputHeight, &m_stEncodeBuffer[i].stInputBfr.hInputSurface, isYuv444);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
m_stEncodeBuffer[i].stInputBfr.bufferFmt = isYuv444 ? NV_ENC_BUFFER_FORMAT_YUV444_PL : NV_ENC_BUFFER_FORMAT_NV12_PL;
m_stEncodeBuffer[i].stInputBfr.dwWidth = uInputWidth;
m_stEncodeBuffer[i].stInputBfr.dwHeight = uInputHeight;
nvStatus = m_pNvHWEncoder->NvEncCreateBitstreamBuffer(BITSTREAM_BUFFER_SIZE, &m_stEncodeBuffer[i].stOutputBfr.hBitstreamBuffer);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
m_stEncodeBuffer[i].stOutputBfr.dwBitstreamBufferSize = BITSTREAM_BUFFER_SIZE;
#if defined (NV_WINDOWS)
nvStatus = m_pNvHWEncoder->NvEncRegisterAsyncEvent(&m_stEncodeBuffer[i].stOutputBfr.hOutputEvent);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
m_stEncodeBuffer[i].stOutputBfr.bWaitOnEvent = true;
#else
m_stEncodeBuffer[i].stOutputBfr.hOutputEvent = NULL;
#endif
}
m_stEOSOutputBfr.bEOSFlag = TRUE;
#if defined (NV_WINDOWS)
nvStatus = m_pNvHWEncoder->NvEncRegisterAsyncEvent(&m_stEOSOutputBfr.hOutputEvent);
if (nvStatus != NV_ENC_SUCCESS)
return nvStatus;
#else
m_stEOSOutputBfr.hOutputEvent = NULL;
#endif
return NV_ENC_SUCCESS;
}
NVENCSTATUS ReleaseIOBuffers()
{
for (uint32_t i = 0; i < m_uEncodeBufferCount; i++)
{
m_pNvHWEncoder->NvEncDestroyInputBuffer(m_stEncodeBuffer[i].stInputBfr.hInputSurface);
m_stEncodeBuffer[i].stInputBfr.hInputSurface = NULL;
m_pNvHWEncoder->NvEncDestroyBitstreamBuffer(m_stEncodeBuffer[i].stOutputBfr.hBitstreamBuffer);
m_stEncodeBuffer[i].stOutputBfr.hBitstreamBuffer = NULL;
#if defined(NV_WINDOWS)
m_pNvHWEncoder->NvEncUnregisterAsyncEvent(m_stEncodeBuffer[i].stOutputBfr.hOutputEvent);
nvCloseFile(m_stEncodeBuffer[i].stOutputBfr.hOutputEvent);
m_stEncodeBuffer[i].stOutputBfr.hOutputEvent = NULL;
#endif
}
if (m_stEOSOutputBfr.hOutputEvent)
{
#if defined(NV_WINDOWS)
m_pNvHWEncoder->NvEncUnregisterAsyncEvent(m_stEOSOutputBfr.hOutputEvent);
nvCloseFile(m_stEOSOutputBfr.hOutputEvent);
m_stEOSOutputBfr.hOutputEvent = NULL;
#endif
}
return NV_ENC_SUCCESS;
}
protected:
CNvHWEncoder *m_pNvHWEncoder;
uint32_t m_uEncodeBufferCount;
uint32_t m_uPicStruct;
void* m_pDevice;
#if defined(NV_WINDOWS)
IDirect3D9 *m_pD3D;
#endif
CUcontext m_cuContext;
EncodeConfig m_stEncoderInput;
EncodeBuffer m_stEncodeBuffer[MAX_ENCODE_QUEUE];
CNvQueue<EncodeBuffer> m_EncodeBufferQueue;
EncodeOutputBuffer m_stEOSOutputBfr;
void convertYUVpitchtoNV12( unsigned char *yuv_luma, unsigned char *yuv_cb, unsigned char *yuv_cr,
unsigned char *nv12_luma, unsigned char *nv12_chroma,
int width, int height , int srcStride, int dstStride)
{
int y;
int x;
if (srcStride == 0)
srcStride = width;
if (dstStride == 0)
dstStride = width;
for ( y = 0 ; y < height ; y++)
{
memcpy( nv12_luma + (dstStride*y), yuv_luma + (srcStride*y) , width );
}
for ( y = 0 ; y < height/2 ; y++)
{
for ( x= 0 ; x < width; x=x+2)
{
nv12_chroma[(y*dstStride) + x] = yuv_cb[((srcStride/2)*y) + (x >>1)];
nv12_chroma[(y*dstStride) +(x+1)] = yuv_cr[((srcStride/2)*y) + (x >>1)];
}
}
}
void convertYUVpitchtoYUV444(unsigned char *yuv_luma, unsigned char *yuv_cb, unsigned char *yuv_cr,
unsigned char *surf_luma, unsigned char *surf_cb, unsigned char *surf_cr, int width, int height, int srcStride, int dstStride)
{
int h;
for (h = 0; h < height; h++)
{
memcpy(surf_luma + dstStride * h, yuv_luma + srcStride * h, width);
memcpy(surf_cb + dstStride * h, yuv_cb + srcStride * h, width);
memcpy(surf_cr + dstStride * h, yuv_cr + srcStride * h, width);
}
}
protected:
NVENCSTATUS Deinitialize(uint32_t devicetype);
NVENCSTATUS InitD3D9(uint32_t deviceID = 0);
NVENCSTATUS InitD3D11(uint32_t deviceID = 0);
NVENCSTATUS InitD3D10(uint32_t deviceID = 0);
unsigned char* LockInputBuffer(void * hInputSurface, uint32_t *pLockedPitch);
NVENCSTATUS FlushEncoder(DataPacket* dataPacket) {
NVENCSTATUS nvStatus = m_pNvHWEncoder->NvEncFlushEncoderQueue(m_stEOSOutputBfr.hOutputEvent);
if (nvStatus != NV_ENC_SUCCESS)
{
assert(0);
return nvStatus;
}
EncodeBuffer *pEncodeBufer = m_EncodeBufferQueue.GetPending();
while (pEncodeBufer)
{
m_pNvHWEncoder->ProcessOutput(pEncodeBufer, dataPacket);
pEncodeBufer = m_EncodeBufferQueue.GetPending();
}
#if defined(NV_WINDOWS)
if (WaitForSingleObject(m_stEOSOutputBfr.hOutputEvent, 500) != WAIT_OBJECT_0)
{
assert(0);
nvStatus = NV_ENC_ERR_GENERIC;
}
#endif
return nvStatus;
}
};
// NVEncodeAPI entry point
typedef NVENCSTATUS (NVENCAPI *MYPROC)(NV_ENCODE_API_FUNCTION_LIST*);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,202 @@
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include "nvEncodeAPI.h"
#include "nvUtils.h"
#define SET_VER(configStruct, type) {configStruct.version = type##_VER;}
#if defined (NV_WINDOWS)
#include "d3d9.h"
#define NVENCAPI __stdcall
#pragma warning(disable : 4996)
#elif defined (NV_UNIX)
#include <dlfcn.h>
#include <string.h>
#define NVENCAPI
#endif
typedef struct _EncodeConfig
{
int width;
int height;
int maxWidth;
int maxHeight;
int fps;
int bitrate;
int vbvMaxBitrate;
int vbvSize;
int rcMode;
int qp;
GUID presetGUID;
int codec;
int invalidateRefFramesEnableFlag;
int intraRefreshEnableFlag;
int intraRefreshPeriod;
int intraRefreshDuration;
int deviceType;
int startFrameIdx;
int endFrameIdx;
int gopLength;
int numB;
int pictureStruct;
int deviceID;
int isYuv444;
char *qpDeltaMapFile;
char* inputFileName;
char* outputFileName;
char* encoderPreset;
char* inputFilePath;
char *encCmdFileName;
}EncodeConfig;
typedef struct _DataPacket
{
uint8_t *data;
int size;
}DataPacket;
typedef struct _EncodeInputBuffer
{
unsigned int dwWidth;
unsigned int dwHeight;
#if defined (NV_WINDOWS)
IDirect3DSurface9 *pNV12Surface;
#endif
CUdeviceptr pNV12devPtr;
uint32_t uNV12Stride;
CUdeviceptr pNV12TempdevPtr;
uint32_t uNV12TempStride;
void* nvRegisteredResource;
NV_ENC_INPUT_PTR hInputSurface;
NV_ENC_BUFFER_FORMAT bufferFmt;
}EncodeInputBuffer;
typedef struct _EncodeOutputBuffer
{
unsigned int dwBitstreamBufferSize;
NV_ENC_OUTPUT_PTR hBitstreamBuffer;
HANDLE hOutputEvent;
bool bWaitOnEvent;
bool bEOSFlag;
}EncodeOutputBuffer;
typedef struct _EncodeBuffer
{
EncodeOutputBuffer stOutputBfr;
EncodeInputBuffer stInputBfr;
}EncodeBuffer;
typedef struct _NvEncPictureCommand
{
bool bResolutionChangePending;
bool bBitrateChangePending;
bool bForceIDR;
bool bForceIntraRefresh;
bool bInvalidateRefFrames;
uint32_t newWidth;
uint32_t newHeight;
uint32_t newBitrate;
uint32_t newVBVSize;
uint32_t intraRefreshDuration;
uint32_t numRefFramesToInvalidate;
uint32_t refFrameNumbers[16];
}NvEncPictureCommand;
enum
{
NV_ENC_H264 = 0,
NV_ENC_HEVC = 1,
};
class CNvHWEncoder
{
public:
uint32_t m_EncodeIdx;
uint32_t m_uMaxWidth;
uint32_t m_uMaxHeight;
uint32_t m_uCurWidth;
uint32_t m_uCurHeight;
protected:
bool m_bEncoderInitialized;
GUID codecGUID;
NV_ENCODE_API_FUNCTION_LIST* m_pEncodeAPI;
HINSTANCE m_hinstLib;
void *m_hEncoder;
NV_ENC_INITIALIZE_PARAMS m_stCreateEncodeParams;
NV_ENC_CONFIG m_stEncodeConfig;
public:
NVENCSTATUS NvEncOpenEncodeSession(void* device, uint32_t deviceType);
NVENCSTATUS NvEncGetEncodeGUIDCount(uint32_t* encodeGUIDCount);
NVENCSTATUS NvEncGetEncodeProfileGUIDCount(GUID encodeGUID, uint32_t* encodeProfileGUIDCount);
NVENCSTATUS NvEncGetEncodeProfileGUIDs(GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
NVENCSTATUS NvEncGetEncodeGUIDs(GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
NVENCSTATUS NvEncGetInputFormatCount(GUID encodeGUID, uint32_t* inputFmtCount);
NVENCSTATUS NvEncGetInputFormats(GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount);
NVENCSTATUS NvEncGetEncodeCaps(GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal);
NVENCSTATUS NvEncGetEncodePresetCount(GUID encodeGUID, uint32_t* encodePresetGUIDCount);
NVENCSTATUS NvEncGetEncodePresetGUIDs(GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount);
NVENCSTATUS NvEncGetEncodePresetConfig(GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig);
NVENCSTATUS NvEncCreateInputBuffer(uint32_t width, uint32_t height, void** inputBuffer, uint32_t isYuv444);
NVENCSTATUS NvEncDestroyInputBuffer(NV_ENC_INPUT_PTR inputBuffer);
NVENCSTATUS NvEncCreateBitstreamBuffer(uint32_t size, void** bitstreamBuffer);
NVENCSTATUS NvEncDestroyBitstreamBuffer(NV_ENC_OUTPUT_PTR bitstreamBuffer);
NVENCSTATUS NvEncLockBitstream(NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams);
NVENCSTATUS NvEncUnlockBitstream(NV_ENC_OUTPUT_PTR bitstreamBuffer);
NVENCSTATUS NvEncLockInputBuffer(void* inputBuffer, void** bufferDataPtr, uint32_t* pitch);
NVENCSTATUS NvEncUnlockInputBuffer(NV_ENC_INPUT_PTR inputBuffer);
NVENCSTATUS NvEncGetEncodeStats(NV_ENC_STAT* encodeStats);
NVENCSTATUS NvEncGetSequenceParams(NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload);
NVENCSTATUS NvEncRegisterAsyncEvent(void** completionEvent);
NVENCSTATUS NvEncUnregisterAsyncEvent(void* completionEvent);
NVENCSTATUS NvEncMapInputResource(void* registeredResource, void** mappedResource);
NVENCSTATUS NvEncUnmapInputResource(NV_ENC_INPUT_PTR mappedInputBuffer);
NVENCSTATUS NvEncDestroyEncoder();
NVENCSTATUS NvEncInvalidateRefFrames(const NvEncPictureCommand *pEncPicCommand);
NVENCSTATUS NvEncOpenEncodeSessionEx(void* device, NV_ENC_DEVICE_TYPE deviceType);
NVENCSTATUS NvEncRegisterResource(NV_ENC_INPUT_RESOURCE_TYPE resourceType, void* resourceToRegister, uint32_t width, uint32_t height, uint32_t pitch, void** registeredResource);
NVENCSTATUS NvEncUnregisterResource(NV_ENC_REGISTERED_PTR registeredRes);
NVENCSTATUS NvEncReconfigureEncoder(const NvEncPictureCommand *pEncPicCommand);
NVENCSTATUS NvEncFlushEncoderQueue(void *hEOSEvent);
CNvHWEncoder();
virtual ~CNvHWEncoder();
NVENCSTATUS Initialize(void* device, NV_ENC_DEVICE_TYPE deviceType);
NVENCSTATUS Deinitialize();
NVENCSTATUS NvEncEncodeFrame(EncodeBuffer *pEncodeBuffer, NvEncPictureCommand *encPicCommand,
uint32_t width, uint32_t height,
NV_ENC_PIC_STRUCT ePicStruct = NV_ENC_PIC_STRUCT_FRAME,
int8_t *qpDeltaMapArray = NULL, uint32_t qpDeltaMapArraySize = 0);
NVENCSTATUS CreateEncoder(const EncodeConfig *pEncCfg);
GUID GetPresetGUID(char* encoderPreset, int codec);
NVENCSTATUS ProcessOutput(const EncodeBuffer *pEncodeBuffer, DataPacket* dataPacket);
NVENCSTATUS FlushEncoder();
NVENCSTATUS ValidateEncodeGUID(GUID inputCodecGuid);
NVENCSTATUS ValidatePresetGUID(GUID presetCodecGuid, GUID inputCodecGuid);
static NVENCSTATUS ParseArguments(EncodeConfig *encodeConfig, int argc, char *argv[]);
};
typedef NVENCSTATUS (NVENCAPI *MYPROC)(NV_ENCODE_API_FUNCTION_LIST*);

View File

@ -0,0 +1,28 @@
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#ifndef NVCPUOPSYS_H
#define NVCPUOPSYS_H
#if defined(_WIN32) || defined(_WIN16)
# define NV_WINDOWS
#endif
#if (defined(__unix__) || defined(__unix) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(__DJGPP__) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */
# define NV_UNIX
#endif /* defined(__unix__) */
#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE)
# define NV_LINUX
#endif /* defined(__linux__) */
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,177 @@
///
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#ifndef NVFILE_IO_H
#define NVFILE_IO_H
#if defined __linux__
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <dlfcn.h>
#include <stdlib.h>
typedef void * HANDLE;
typedef void *HINSTANCE;
typedef unsigned long DWORD, *LPDWORD;
typedef DWORD FILE_SIZE;
#define FALSE 0
#define TRUE 1
#define INFINITE UINT_MAX
#define FILE_BEGIN SEEK_SET
#define INVALID_SET_FILE_POINTER (-1)
#define INVALID_HANDLE_VALUE ((void *)(-1))
#else
#include <stdio.h>
#include <windows.h>
#endif
#include "nvCPUOPSys.h"
typedef unsigned long long U64;
typedef unsigned int U32;
inline U32 nvSetFilePointer(HANDLE hInputFile, U32 fileOffset, U32 *moveFilePointer, U32 flag)
{
#if defined (NV_WINDOWS)
return SetFilePointer(hInputFile, fileOffset, NULL, flag);
#elif defined __linux || defined __APPLE_ || defined __MACOSX
return fseek((FILE *)hInputFile, fileOffset, flag);
#endif
}
inline U32 nvSetFilePointer64(HANDLE hInputFile, U64 fileOffset, U64 *moveFilePointer, U32 flag)
{
#if defined (NV_WINDOWS)
return SetFilePointer(hInputFile, ((U32 *)&fileOffset)[0], (PLONG)&((U32 *)&fileOffset)[1], flag);
#elif defined __linux || defined __APPLE__ || defined __MACOSX
return fseek((FILE *)hInputFile, (long int)fileOffset, flag);
#endif
}
inline bool nvReadFile(HANDLE hInputFile, void *buf, U32 bytes_to_read, U32 *bytes_read, void *operlapped)
{
#if defined (NV_WINDOWS)
ReadFile(hInputFile, buf, bytes_to_read, (LPDWORD)bytes_read, NULL);
return true;
#elif defined __linux || defined __APPLE__ || defined __MACOSX
U32 num_bytes_read;
num_bytes_read = fread(buf, bytes_to_read, 1, (FILE *)hInputFile);
if (bytes_read)
{
*bytes_read = num_bytes_read;
}
return true;
#endif
}
inline void nvGetFileSize(HANDLE hInputFile, DWORD *pFilesize)
{
#if defined (NV_WINDOWS)
LARGE_INTEGER file_size;
if (hInputFile != INVALID_HANDLE_VALUE)
{
file_size.LowPart = GetFileSize(hInputFile, (LPDWORD)&file_size.HighPart);
printf("[ Input Filesize] : %lld bytes\n", ((LONGLONG) file_size.HighPart << 32) + (LONGLONG)file_size.LowPart);
if (pFilesize != NULL) *pFilesize = file_size.LowPart;
}
#elif defined __linux || defined __APPLE__ || defined __MACOSX
FILE_SIZE file_size;
if (hInputFile != NULL)
{
nvSetFilePointer64(hInputFile, 0, NULL, SEEK_END);
file_size = ftell((FILE *)hInputFile);
nvSetFilePointer64(hInputFile, 0, NULL, SEEK_SET);
printf("Input Filesize: %ld bytes\n", file_size);
if (pFilesize != NULL) *pFilesize = file_size;
}
#endif
}
inline HANDLE nvOpenFile(const char *input_file)
{
HANDLE hInput = NULL;
#if defined (NV_WINDOWS)
hInput = CreateFileA(input_file, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING , FILE_ATTRIBUTE_NORMAL, NULL);
if (hInput == INVALID_HANDLE_VALUE)
{
fprintf(stderr, "nvOpenFile Failed to open \"%s\"\n", input_file);
exit(EXIT_FAILURE);
}
#elif defined __linux || defined __APPLE_ || defined __MACOSX
hInput = fopen(input_file, "rb");
if (hInput == NULL)
{
fprintf(stderr, "nvOpenFile Failed to open \"%s\"\n", input_file);
exit(EXIT_FAILURE);
}
#endif
return hInput;
}
inline HANDLE nvOpenFileWrite(const char *output_file)
{
HANDLE hOutput = NULL;
#if defined (NV_WINDOWS)
hOutput = CreateFileA(output_file, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING , FILE_ATTRIBUTE_NORMAL, NULL);
if (hOutput == INVALID_HANDLE_VALUE)
{
fprintf(stderr, "nvOpenFileWrite Failed to open \"%s\"\n", output_file);
exit(EXIT_FAILURE);
}
#elif defined __linux || defined __APPLE_ || defined __MACOSX
hOutput = fopen(output_file, "wb+");
if (hOutput == NULL)
{
fprintf(stderr, "nvOpenFileWrite Failed to open \"%s\"\n", output_file);
exit(EXIT_FAILURE);
}
#endif
return hOutput;
}
inline void nvCloseFile(HANDLE hFileHandle)
{
if (hFileHandle)
{
#if defined (NV_WINDOWS)
CloseHandle(hFileHandle);
#else
fclose((FILE *)hFileHandle);
#endif
}
}
#endif

View File

@ -0,0 +1,127 @@
//
// Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
#ifndef NVUTILS_H
#define NVUTILS_H
#include "nvCPUOPSys.h"
#include "nvFileIO.h"
#if defined (NV_WINDOWS)
#include <windows.h>
#elif defined NV_UNIX
#include <sys/time.h>
#include <limits.h>
#define FALSE 0
#define TRUE 1
#define INFINITE UINT_MAX
#define stricmp strcasecmp
#define FILE_BEGIN SEEK_SET
#define INVALID_SET_FILE_POINTER (-1)
#define INVALID_HANDLE_VALUE ((void *)(-1))
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
typedef void* HANDLE;
typedef void* HINSTANCE;
typedef unsigned long DWORD, *LPWORD;
typedef DWORD FILE_SIZE;
#endif
inline bool NvSleep(unsigned int mSec)
{
#if defined (NV_WINDOWS)
Sleep(mSec);
#elif defined NV_UNIX
usleep(mSec * 1000);
#else
#error NvSleep function unknown for this platform.
#endif
return true;
}
inline bool NvQueryPerformanceFrequency(unsigned long long *freq)
{
*freq = 0;
#if defined (NV_WINDOWS)
LARGE_INTEGER lfreq;
if (!QueryPerformanceFrequency(&lfreq)) {
return false;
}
*freq = lfreq.QuadPart;
#elif defined NV_UNIX
// We use system's gettimeofday() to return timer ticks in uSec
*freq = 1000000000;
#else
#error NvQueryPerformanceFrequency function not defined for this platform.
#endif
return true;
}
#define SEC_TO_NANO_ULL(sec) ((unsigned long long)sec * 1000000000)
#define MICRO_TO_NANO_ULL(sec) ((unsigned long long)sec * 1000)
inline bool NvQueryPerformanceCounter(unsigned long long *counter)
{
*counter = 0;
#if defined (NV_WINDOWS)
LARGE_INTEGER lcounter;
if (!QueryPerformanceCounter(&lcounter)) {
return false;
}
*counter = lcounter.QuadPart;
#elif defined NV_UNIX
struct timeval tv;
int ret;
ret = gettimeofday(&tv, NULL);
if (ret != 0) {
return false;
}
*counter = SEC_TO_NANO_ULL(tv.tv_sec) + MICRO_TO_NANO_ULL(tv.tv_usec);
#else
#error NvQueryPerformanceCounter function not defined for this platform.
#endif
return true;
}
#if defined NV_UNIX
__inline bool operator==(const GUID &guid1, const GUID &guid2)
{
if (guid1.Data1 == guid2.Data1 &&
guid1.Data2 == guid2.Data2 &&
guid1.Data3 == guid2.Data3 &&
guid1.Data4[0] == guid2.Data4[0] &&
guid1.Data4[1] == guid2.Data4[1] &&
guid1.Data4[2] == guid2.Data4[2] &&
guid1.Data4[3] == guid2.Data4[3] &&
guid1.Data4[4] == guid2.Data4[4] &&
guid1.Data4[5] == guid2.Data4[5] &&
guid1.Data4[6] == guid2.Data4[6] &&
guid1.Data4[7] == guid2.Data4[7])
{
return true;
}
return false;
}
__inline bool operator!=(const GUID &guid1, const GUID &guid2)
{
return !(guid1 == guid2);
}
#endif
#endif
#define PRINTERR(message, ...) \
fprintf(stderr, "%s line %d: " message, __FILE__, __LINE__, ##__VA_ARGS__)

View File

@ -0,0 +1,40 @@
#pragma once
#include "wddm.h"
#include "Capture.h"
class WDDMCapture : public Capture {
public:
void init(UINT monitorID, RECT screen)
{
this->screen = screen;
wddm.wf_dxgi_init(monitorID, screen);
}
int getNextFrame(RGBQUAD** pPixels)
{
int rc;
rc = wddm.wf_dxgi_nextFrame(3000);
if (rc != 0) {
return rc;
}
int pitch;
rc = wddm.wf_dxgi_getPixelData((byte**)pPixels, &pitch, &screen);
if (rc != 0) {
return rc;
}
return 0;
}
void doneNextFrame()
{
int rc = wddm.wf_dxgi_releasePixelData();
}
private:
RECT screen;
WDDM wddm;
};

View File

@ -0,0 +1,53 @@
#include <boost/circular_buffer.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/thread.hpp>
#include <boost/call_traits.hpp>
#include <boost/bind.hpp>
#include <boost/timer/timer.hpp> // for auto_cpu_timer
template <class T>
class bounded_buffer
{
public:
typedef boost::circular_buffer<T> container_type;
typedef typename container_type::size_type size_type;
typedef typename container_type::value_type value_type;
typedef typename boost::call_traits<value_type>::param_type param_type;
explicit bounded_buffer(size_type capacity) : m_unread(0), m_container(capacity) {}
void push_front(typename boost::call_traits<value_type>::param_type item)
{ // `param_type` represents the "best" way to pass a parameter of type `value_type` to a method.
boost::mutex::scoped_lock lock(m_mutex);
m_not_full.wait(lock, boost::bind(&bounded_buffer<value_type>::is_not_full, this));
m_container.push_front(item);
++m_unread;
lock.unlock();
m_not_empty.notify_one();
}
void pop_back(value_type* pItem) {
boost::mutex::scoped_lock lock(m_mutex);
m_not_empty.wait(lock, boost::bind(&bounded_buffer<value_type>::is_not_empty, this));
*pItem = m_container[--m_unread];
lock.unlock();
m_not_full.notify_one();
}
private:
bounded_buffer(const bounded_buffer&); // Disabled copy constructor.
bounded_buffer& operator = (const bounded_buffer&); // Disabled assign operator.
bool is_not_empty() const { return m_unread > 0; }
bool is_not_full() const { return m_unread < m_container.capacity(); }
size_type m_unread;
container_type m_container;
boost::mutex m_mutex;
boost::condition m_not_empty;
boost::condition m_not_full;
}; //

View File

@ -0,0 +1,100 @@
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "color_conversion.h"
#include "stdio.h"
__host__ __device__ __forceinline__ int divUp(int total, int grain)
{
return (total + grain - 1) / grain;
}
__global__ void RGB_to_jp(uchar4 *input, unsigned char *yuv_luma, unsigned char *yuv_cb, unsigned char *yuv_cr, int width, int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y>=height) return;
uchar4 px = input[y * width + x];
int Y = ( ( 66 * px.x + 129 * px.y + 25 * px.z + 128) >> 8) + 16;
int U = ( ( -38 * px.x - 74 * px.y + 112 * px.z + 128) >> 8) + 128;
int V = ( ( 112 * px.x - 94 * px.y - 18 * px.z + 128) >> 8) + 128;
yuv_luma[y * width + x] = Y;
int pos = (y >> 1) * (width >> 1) + (x >> 1);
yuv_cr[pos] = U;
yuv_cb[pos] = V;
}
bool RGB_to_YV12(int width, int height, void *pPixels, void* yuv_luma, void* yuv_cb, void* yuv_cr)
{
cudaError_t cudaStatus;
const dim3 block(32, 8);
const dim3 grid(divUp(width, block.x), divUp(height, block.y));
unsigned char *yuv_luma_device;
cudaMalloc(&yuv_luma_device, width *height * sizeof(unsigned char));
unsigned char *yuv_cb_device;
cudaMalloc(&yuv_cb_device, width *height * sizeof(unsigned char) / 4);
unsigned char *yuv_cr_device;
cudaMalloc(&yuv_cr_device, width *height * sizeof(unsigned char) / 4);
// Copy input vectors from host memory to GPU buffers.
uchar4 *dev_pPixels;
cudaStatus = cudaMalloc((void**)&dev_pPixels, width *height * sizeof(uchar4));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
return false;
}
cudaStatus = cudaMemcpy(dev_pPixels, pPixels, width *height * sizeof(uchar4), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 1 failed!");
return false;
}
RGB_to_jp<<< grid, block >>>(dev_pPixels, yuv_luma_device, yuv_cb_device, yuv_cr_device, width, height);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "RGB_to_jp failed!");
return false;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize failed!");
return false;
}
cudaStatus = cudaMemcpy(yuv_luma, yuv_luma_device, width *height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
return false;
}
cudaStatus = cudaMemcpy(yuv_cb, yuv_cb_device, width *height * sizeof(unsigned char) / 4, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
return false;
}
cudaStatus = cudaMemcpy(yuv_cr, yuv_cr_device, width *height * sizeof(unsigned char) / 4, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy 2 failed!");
return false;
}
cudaFree(yuv_luma_device);
cudaFree(yuv_cb_device);
cudaFree(yuv_cr_device);
cudaFree(dev_pPixels);
return true;
}

View File

@ -0,0 +1 @@
bool RGB_to_YV12(int width, int height, void *pPixels, void* yuv_luma, void* yuv_cb, void* yuv_cr);

View File

@ -0,0 +1,4 @@
#cmakedefine HAS_CUDA
#cmakedefine HAS_WDDM
#cmakedefine HAS_NVENC
#cmakedefine HAS_FFMPEG

View File

@ -0,0 +1,43 @@
#pragma once
#include <time.h>
class FPS {
public:
FPS() {
fps = 0;
numFrame = 0;
lastSec = 0;
lastShouldRefresh = 0;
}
void newFrame() {
numFrame++;
double newTime = (double)clock() / CLOCKS_PER_SEC;
if (newTime >= lastSec + 1) {
fps = numFrame;
numFrame = 0;
lastSec = newTime;
printf("FPS: %d\n", getFps());
}
}
int getFps() {
return fps;
}
/* Returns true only 30 times per second */
bool shouldRefresh() {
double newTime = (double)clock() / CLOCKS_PER_SEC;
if (newTime >= lastShouldRefresh + 1.0/30) {
lastShouldRefresh = newTime;
return true;
} else {
return false;
}
}
private:
int fps;
int numFrame;
double lastSec;
double lastShouldRefresh;
};

View File

@ -0,0 +1,19 @@
#pragma once
BOOL CALLBACK MonitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData);
class Monitor {
public:
Monitor() {
if(!EnumDisplayMonitors(NULL, NULL, MonitorEnumProc, reinterpret_cast<LPARAM>(this))) {
throw std::runtime_error ("EnumDisplayMonitors failed");
}
}
std::vector<RECT> monitors;
};
BOOL CALLBACK MonitorEnumProc(HMONITOR hMonitor, HDC hdcMonitor, LPRECT lprcMonitor, LPARAM dwData)
{
Monitor* mon = (Monitor*)dwData;
mon->monitors.push_back(*lprcMonitor);
return true;
}

View File

@ -0,0 +1,38 @@
#pragma once
#include <map>
#include <iostream>
#include <string>
using namespace std;
class Params {
public:
Params(int argc, const char* argv[]) {
// defaults
monitor = -1;
port = -1;
map<string, string> params;
for (int i = 1; i < argc; i++) {
string key = argv[i];
params[key] = argv[i + 1];
i++;
}
typedef map<string, string>::iterator it_type;
for (it_type iterator = params.begin(); iterator != params.end(); iterator++) {
cout << iterator->first << " : " << iterator->second << endl;
if (iterator->first.compare("monitor") == 0) {
monitor = atoi(iterator->second.c_str());
} else if (iterator->first.compare("port") == 0) {
port = atoi(iterator->second.c_str());
}
}
}
int monitor;
int port;
};

View File

@ -0,0 +1,278 @@
//
// Copyright (c) 2003-2013 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <fstream>
#include <algorithm>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <bounded_buffer.h>
#include "fps.h"
#include "monitor.h"
#include "params.h"
#include "config.h"
#ifdef HAS_WDDM
#include "WDDMCapture.h"
#else
#include "GDICapture.h"
#endif
#ifdef HAS_FFMPEG
#include "FFMPEG_encoding.hpp"
#endif
#ifdef HAS_NVENC
#include "NV_encoding.hpp"
#endif
using namespace std;
using namespace boost::asio;
using ip::tcp;
const int max_length = 1024;
typedef boost::shared_ptr<tcp::socket> socket_ptr;
bounded_buffer<RGBQUAD*> screenToSendQueue(2);
void threadScreenCapture(UINT monitorID, RECT screen){
int height = screen.bottom - screen.top;
int width = screen.right - screen.left;
#ifdef HAS_WDDM
WDDMCapture capture;
#else
GDICapture capture;
#endif
capture.init(monitorID, screen);
RGBQUAD* pPixels;
FPS fps;
while(true){
int rc = capture.getNextFrame(&pPixels);
if (rc == 0) {
RGBQUAD* pixCopy = new RGBQUAD[width * height];
memcpy(pixCopy, pPixels, width * height * sizeof(RGBQUAD));
screenToSendQueue.push_front(pixCopy);
capture.doneNextFrame();
fps.newFrame();
}
}
}
void sessionVideo(socket_ptr sock, UINT monitorID, RECT screen)
{
// get the height and width of the screen
int height = screen.bottom - screen.top;
int width = screen.right - screen.left;
#ifdef HAS_NVENC
NV_encoding nv_encoding;
nv_encoding.load(width, height, sock, monitorID);
#elif defined(HAS_FFMPEG)
FFMPEG_encoding ffmpeg;
ffmpeg.load(width, height, sock);
#endif
boost::thread t(boost::bind(threadScreenCapture, monitorID, screen));
FPS fps;
RGBQUAD* pPixels;
while(true){
screenToSendQueue.pop_back(&pPixels);
#ifdef HAS_NVENC
nv_encoding.write(width, height, pPixels);
#elif defined(HAS_FFMPEG)
ffmpeg.write(width, height, pPixels);
#endif
//fps.newFrame();
free(pPixels);
}
#ifdef HAS_NVENC
nv_encoding.close();
#elif defined(HAS_FFMPEG)
ffmpeg.close();
#endif
}
struct SendStruct {
int type;
int x;
int y;
int button;
int keycode;
};
void sessionKeystroke(socket_ptr sock, RECT screen)
{
char data[sizeof(SendStruct)];
boost::system::error_code error;
SendStruct* s;
INPUT input = {0};
while(true) {
size_t length = sock->read_some(buffer(data), error);
if (error == error::eof)
return; // Connection closed cleanly by peer.
else if (error)
throw boost::system::system_error(error); // Some other error.
s = (SendStruct*)data;
::ZeroMemory(&input,sizeof(INPUT));
switch(s->type){
case 0: // MotionNotify
SetCursorPos(s->x + screen.left, s->y + screen.top);
break;
case 1:
switch (s->button) {
case 1: // left button
input.mi.dwFlags = MOUSEEVENTF_LEFTDOWN;
break;
case 2: // middle button
input.mi.dwFlags = MOUSEEVENTF_MIDDLEDOWN;
break;
case 3: // third button
input.mi.dwFlags = MOUSEEVENTF_RIGHTDOWN;
break;
case 4: // scroll up
input.mi.dwFlags = MOUSEEVENTF_WHEEL;
input.mi.mouseData = 100;
break;
case 5: // scroll down
input.mi.dwFlags = MOUSEEVENTF_WHEEL;
input.mi.mouseData = -100;
break;
}
input.type = INPUT_MOUSE;
::SendInput(1,&input,sizeof(INPUT));
break;
case 2:
switch (s->button) {
case 1: // left button
input.mi.dwFlags = MOUSEEVENTF_LEFTUP;
break;
case 2: // middle button
input.mi.dwFlags = MOUSEEVENTF_MIDDLEUP;
break;
case 3: // third button
input.mi.dwFlags = MOUSEEVENTF_RIGHTUP;
break;
}
if (input.mi.dwFlags) {
input.type = INPUT_MOUSE;
::SendInput(1,&input,sizeof(INPUT));
}
break;
case 3:
input.type = INPUT_KEYBOARD;
input.ki.wScan = s->keycode;
input.ki.wVk=0;
input.ki.dwFlags = KEYEVENTF_UNICODE;
::SendInput(1,&input,sizeof(INPUT));
break;
case 4:
input.type = INPUT_KEYBOARD;
input.ki.wScan = s->keycode;
input.ki.wVk=0;
input.ki.dwFlags = KEYEVENTF_UNICODE | KEYEVENTF_KEYUP;
::SendInput(1,&input,sizeof(INPUT));
break;
}
}
}
void session(socket_ptr sock, UINT monitorID, RECT screenCoordinates)
{
try
{
sock->set_option(tcp::no_delay(true));
char data[max_length];
boost::system::error_code error;
size_t length = sock->read_some(buffer(data), error);
if (error == error::eof)
return; // Connection closed cleanly by peer.
else if (error)
throw boost::system::system_error(error); // Some other error.
if (data[0] == 'a'){
sessionVideo(sock, monitorID, screenCoordinates);
} else if (data[0] == 'b'){
sessionKeystroke(sock, screenCoordinates);
} else {
cout << "Received a connection with a wrong identification buffer " << string(data, length) << endl;
}
}
catch (exception& e)
{
cerr << "Exception in thread: " << e.what() << "\n";
}
}
void server(io_service& io_service, short port, UINT monitorID, RECT screenCoordinates)
{
tcp::acceptor a(io_service, tcp::endpoint(tcp::v4(), port));
for (;;)
{
socket_ptr sock(new tcp::socket(io_service));
a.accept(*sock);
boost::thread t(boost::bind(session, sock, monitorID, screenCoordinates));
}
}
int main(int argc, const char* argv[])
{
cout << "Version 0.9" << endl;
Params params(argc, argv);
if (params.port == -1)
{
cerr << "Usage: ./server [options] port <#>" << endl;
cerr << "monitor <n>\n";
cerr << "Sample: ./server monitor 1 port 8080" << endl;
return 1;
}
Monitor monitor;
RECT screenCoordinates;
int monitorCount = GetSystemMetrics(SM_CMONITORS);
if (monitorCount > 1 && params.monitor == -1) {
cerr << "There are more than one monitor available, select which monitor to use with\n./server -monitor <n> <port>" << endl;
return 1;
} else {
if (params.monitor < 0 || params.monitor >= monitor.monitors.size()) {
cerr << "The chosen monitor " << params.monitor << " is invalid, select from the following:\n";
for (int i=0;i<monitor.monitors.size();i++) {
RECT r = monitor.monitors[i];
cerr << "Monitor " << i << ":" << "["<<r.left<<" "<<r.top<<","<<r.bottom<<" "<<r.right<<"]" << endl;
}
return 1;
}
screenCoordinates = monitor.monitors[params.monitor];
}
//socket_ptr sock;
//sessionVideo(sock, params.monitor, screenCoordinates); // TODO test
try
{
io_service io_service;
server(io_service, params.port, params.monitor, screenCoordinates);
}
catch (exception& e)
{
cerr << "Exception: " << e.what() << "\n";
}
return 0;
}

View File

@ -0,0 +1,405 @@
#pragma once
//#pragma comment (lib, "d3dx11.lib")
//#pragma comment (lib, "d3dx10.lib")
#include <windows.h>
#include <windowsx.h>
//#define CINTERFACE
#include <DXGItype.h>
#include <D3D11.h>
#pragma comment (lib, "d3d11.lib")
#include <dxgi1_2.h>
#include <tchar.h>
/* Driver types supported */
D3D_DRIVER_TYPE DriverTypes[] =
{
D3D_DRIVER_TYPE_HARDWARE,
D3D_DRIVER_TYPE_WARP,
D3D_DRIVER_TYPE_REFERENCE,
};
UINT NumDriverTypes = ARRAYSIZE(DriverTypes);
D3D_FEATURE_LEVEL FeatureLevels[] =
{
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_1
};
UINT NumFeatureLevels = ARRAYSIZE(FeatureLevels);
D3D_FEATURE_LEVEL FeatureLevel;
ID3D11Device* gDevice = NULL;
ID3D11DeviceContext* gContext = NULL;
IDXGIOutputDuplication* gOutputDuplication = NULL;
IDXGISurface* surf = NULL;
ID3D11Texture2D* sStage = NULL;
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
class WDDM {
public:
int wf_dxgi_init(UINT screenID, RECT screen)
{
//not sure if needed
gAcquiredDesktopImage = NULL;
this->screen = screen;
if (wf_dxgi_createDevice() != 0)
{
return 1;
}
if (wf_dxgi_getDuplication(screenID) != 0)
{
return 1;
}
return 0;
}
int wf_dxgi_createDevice()
{
HRESULT status;
UINT DriverTypeIndex;
for (DriverTypeIndex = 0; DriverTypeIndex < NumDriverTypes; ++DriverTypeIndex)
{
/*status = D3D11CreateDevice(NULL, DriverTypes[DriverTypeIndex], NULL, D3D11_CREATE_DEVICE_DEBUG, FeatureLevels, NumFeatureLevels,
D3D11_SDK_VERSION, &gDevice, &FeatureLevel, &gContext);
*/
status = D3D11CreateDevice(NULL, DriverTypes[DriverTypeIndex], NULL, NULL, FeatureLevels, NumFeatureLevels,
D3D11_SDK_VERSION, &gDevice, &FeatureLevel, &gContext);
if (SUCCEEDED(status))
break;
_tprintf(_T("D3D11CreateDevice returned [%d] for Driver Type %d\n"), status, DriverTypes[DriverTypeIndex]);
}
if (FAILED(status))
{
_tprintf(_T("Failed to create device in InitializeDx\n"));
return 1;
//debug
/*
for (DriverTypeIndex = 0; DriverTypeIndex < NumDriverTypes; ++DriverTypeIndex)
{
status = D3D11CreateDevice(NULL, DriverTypes[DriverTypeIndex], NULL, NULL, FeatureLevels, NumFeatureLevels,
D3D11_SDK_VERSION, &gDevice, &FeatureLevel, &gContext);
if (SUCCEEDED(status))
break;
_tprintf(_T("D3D11CreateDevice returned [%d] for Driver Type %d\n"), status, DriverTypes[DriverTypeIndex]);
}
if (FAILED(status))
{
_tprintf(_T("Failed to create device in InitializeDx\n"));
return 1;
}
*/
}
return 0;
}
int wf_dxgi_getDuplication(UINT screenID)
{
HRESULT status;
UINT dTop, i = 0;
DXGI_OUTPUT_DESC desc;
IDXGIOutput * pOutput;
IDXGIDevice* DxgiDevice = NULL;
IDXGIAdapter* DxgiAdapter = NULL;
IDXGIOutput* DxgiOutput = NULL;
IDXGIOutput1* DxgiOutput1 = NULL;
status = gDevice->QueryInterface(__uuidof(IDXGIDevice), (void**)&DxgiDevice);
if (FAILED(status))
{
_tprintf(_T("Failed to get QI for DXGI Device\n"));
return 1;
}
status = DxgiDevice->GetParent(__uuidof(IDXGIAdapter), (void**)&DxgiAdapter);
DxgiDevice->Release();
DxgiDevice = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get parent DXGI Adapter\n"));
return 1;
}
ZeroMemory(&desc, sizeof(desc));
pOutput = NULL;
while (DxgiAdapter->EnumOutputs(i, &pOutput) != DXGI_ERROR_NOT_FOUND)
{
DXGI_OUTPUT_DESC* pDesc = &desc;
status = pOutput->GetDesc(pDesc);
if (FAILED(status))
{
_tprintf(_T("Failed to get description\n"));
return 1;
}
wprintf(L"Output %d: [%s] [%s] (%d, %d, %d, %d)\n", i, pDesc->DeviceName, pDesc->AttachedToDesktop ? L"attached" : L"not attached",
pDesc->DesktopCoordinates.left, pDesc->DesktopCoordinates.top, pDesc->DesktopCoordinates.right, pDesc->DesktopCoordinates.bottom);
if (pDesc->AttachedToDesktop)
dTop = i;
pOutput->Release();
++i;
}
dTop = screenID;
status = DxgiAdapter->EnumOutputs(dTop, &DxgiOutput);
DxgiAdapter->Release();
DxgiAdapter = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get output\n"));
return 1;
}
status = DxgiOutput->QueryInterface(__uuidof(DxgiOutput1), (void**)&DxgiOutput1);
DxgiOutput->Release();
DxgiOutput = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get IDXGIOutput1\n"));
return 1;
}
status = DxgiOutput1->DuplicateOutput(gDevice, &gOutputDuplication);
DxgiOutput1->Release();
DxgiOutput1 = NULL;
if (FAILED(status))
{
if (status == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE)
{
_tprintf(_T("There is already the maximum number of applications using the Desktop Duplication API running, please close one of those applications and then try again.\n"));
return 1;
}
_tprintf(_T("Failed to get duplicate output\n"));
return 1;
}
return 0;
}
int wf_dxgi_cleanup()
{
if (framesWaiting > 0)
{
wf_dxgi_releasePixelData();
}
if (gAcquiredDesktopImage)
{
gAcquiredDesktopImage->Release();
gAcquiredDesktopImage = NULL;
}
if (gOutputDuplication)
{
gOutputDuplication->Release();
gOutputDuplication = NULL;
}
if (gContext)
{
gContext->Release();
gContext = NULL;
}
if (gDevice)
{
gDevice->Release();
gDevice = NULL;
}
return 0;
}
int wf_dxgi_nextFrame(UINT timeout)
{
HRESULT status = 0;
UINT i = 0;
UINT DataBufferSize = 0;
BYTE* DataBuffer = NULL;
IDXGIResource* DesktopResource = NULL;
if (framesWaiting > 0)
{
wf_dxgi_releasePixelData();
}
if (gAcquiredDesktopImage)
{
gAcquiredDesktopImage->Release();
gAcquiredDesktopImage = NULL;
}
status = gOutputDuplication->AcquireNextFrame(timeout, &FrameInfo, &DesktopResource);
if (status == DXGI_ERROR_WAIT_TIMEOUT)
{
return 1;
}
if (FAILED(status))
{
if (status == DXGI_ERROR_ACCESS_LOST)
{
_tprintf(_T("Failed to acquire next frame with status=%#X\n"), status);
_tprintf(_T("Trying to reinitialize due to ACCESS LOST..."));
wf_dxgi_getDuplication(0);
}
else
{
_tprintf(_T("Failed to acquire next frame with status=%#X\n"), status);
_tprintf(_T("\tAccumulated Frames: %d\n\tRects: %d\n\tBuffSize: %d\n"),
FrameInfo.AccumulatedFrames,
FrameInfo.RectsCoalesced,
FrameInfo.TotalMetadataBufferSize);
status = gOutputDuplication->ReleaseFrame();
if (FAILED(status))
{
_tprintf(_T("Failed to release frame with status=%d\n"), status);
}
return 1;
}
}
status = DesktopResource->QueryInterface(__uuidof(ID3D11Texture2D), (void**)&gAcquiredDesktopImage);
DesktopResource->Release();
DesktopResource = NULL;
if (FAILED(status))
{
return 1;
}
framesWaiting = FrameInfo.AccumulatedFrames;
return 0;
}
int wf_dxgi_getPixelData(BYTE** data, int* pitch, RECT* invalid)
{
HRESULT status;
D3D11_BOX Box;
DXGI_MAPPED_RECT mappedRect;
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Width = (invalid->right - invalid->left);
tDesc.Height = (invalid->bottom - invalid->top);
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Usage = D3D11_USAGE_STAGING;
tDesc.BindFlags = 0;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
tDesc.MiscFlags = 0;
INT OffsetX = screen.left;
INT OffsetY = screen.top;
Box.top = invalid->top - OffsetY;
Box.left = invalid->left - OffsetX;
Box.right = invalid->right - OffsetX;
Box.bottom = invalid->bottom - OffsetY;
Box.front = 0;
Box.back = 1;
status = gDevice->CreateTexture2D(&tDesc, NULL, &sStage);
if (FAILED(status))
{
_tprintf(_T("Failed to create staging surface\n"));
exit(1);
return 1;
}
gContext->CopySubresourceRegion(sStage, 0, 0, 0, 0, gAcquiredDesktopImage, 0, &Box);
status = sStage->QueryInterface(__uuidof(IDXGISurface), (void**)&surf);
if (FAILED(status))
{
_tprintf(_T("Failed to QI staging surface\n"));
exit(1);
return 1;
}
surf->Map(&mappedRect, DXGI_MAP_READ);
if (FAILED(status))
{
_tprintf(_T("Failed to map staging surface\n"));
exit(1);
return 1;
}
*data = mappedRect.pBits;
*pitch = mappedRect.Pitch;
return 0;
}
int wf_dxgi_releasePixelData()
{
HRESULT status;
if (surf) {
surf->Unmap();
surf->Release();
surf = NULL;
}
if (sStage) {
sStage->Release();
sStage = NULL;
}
status = gOutputDuplication->ReleaseFrame();
if (FAILED(status))
{
_tprintf(_T("Failed to release frame\n"));
return 1;
}
framesWaiting = 0;
return 0;
}
private:
ID3D11Texture2D* gAcquiredDesktopImage;
int framesWaiting;
RECT screen;
};

View File

@ -0,0 +1,49 @@
###To compile the server in windows WITH NVIDIA card###
- Install BOOST
- http://www.boost.org/users/download/
- I downloaded boost_1_69_0-msvc-14.1-64.exe from https://sourceforge.net/projects/boost/files/boost-binaries/1.69.0/ for Visual Studio 2017
- Install CMAKE, I took cmake-3.13.3-win64-x64.msi
- http://www.cmake.org/install/
- Install Nvidia CUDA 10.0 from https://developer.nvidia.com/cuda-downloads
- Open CMAKE
- In the field: where is the source code, have the path to the subfolder Server from RPI-GPU-rdpClient git.
- In the field: Where to build the binaries, make a subfolder build under Server
- Press configure, I selected "Visual Studio 15 2017 Win64"
- Click on Add Entry and enter BOOST_ROOT to the root of the Boost folder "C:\local\boost_1_69_0"
- Do the same for BOOST_LIBRARYDIR and set it to "C:\local\boost_1_69_0\lib64-msvc-14.1"
- I had to set CUDA_TOOLKIT_ROOT_DIR to "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0"
- Configure and Generate
- It should look like the following:
- ![ScreenShot](https://i.imgur.com/Htlr9NP.png)
- Open Server\build\server.sln in Visual Studio
- Select Release and Build the Solution
- Open a command prompt and cd to Server\build\Release
- Run "server monitor 0 port 8080"
###To compile the server in windows WITHOUT NVIDIA card###
Note, the FPS will be significantly lower without a NVIDIA card, around 10FPS depending on the CPU.
- Install BOOST
- http://www.boost.org/users/download/
- I downloaded boost_1_60_0-msvc-10.0-32.exe from https://sourceforge.net/projects/boost/files/boost-binaries/1.60.0/ for Visual Studio 2010
- Install CMAKE, I took cmake-3.5.0-rc3-win32-x86.msi
- http://www.cmake.org/install/
- Open CMAKE
- In the field: where is the source code, have the path to the subfolder Server from RPI-GPU-rdpClient git.
- In the field: Where to build the binaries, make a subfolder build under Server
- Press configure, I selected "Visual Studio 10 2010"
- Click on Add Entry and enter BOOST_ROOT to the root of the Boost folder "C:/local/boost_1_60_0"
- Do the same for BOOST_LIBRARYDIR and set it to "C:/local/boost_1_60_0/lib32-msvc-10.0"
- Download FFMPEG from http://ffmpeg.zeranoe.com/builds/, need the dev and shared
- Set FFMPEG_ROOT to the root of FFMPEG dev folder with the README.txt
- In my case "RPI-GPU-rdpClient\ffmpeg\ffmpeg-20160307-git-6f5048f-win32-dev"
- Add the bin folder of the shared zip to your path, or copy the DLLs
- Uncheck USE_CUDA and USE_NVENC
- Only keep USE_WDDM if you have Windows 8.0 or up
- Compile and Generate
- It should look like the following:
- ![ScreenShot](http://i.imgur.com/485jCoE.png)
- Open Server\build\server.sln in Visual Studio
- Select Release and Build the Solution
- Open a command prompt and cd to Server\build\Release
- Run "server monitor 0 port 8080"
- If missing [inttypes.h], check http://stackoverflow.com/questions/13266868/ffmpeg-inttypes-h-not-found-error

View File

@ -0,0 +1,20 @@

Microsoft Visual Studio Solution File, Format Version 11.00
# Visual Studio 2010
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "win8-wddm", "win8-wddm\win8-wddm.vcxproj", "{293FE1A0-EFBC-49A3-840A-FD94FD31C89C}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
Release|Win32 = Release|Win32
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{293FE1A0-EFBC-49A3-840A-FD94FD31C89C}.Debug|Win32.ActiveCfg = Debug|Win32
{293FE1A0-EFBC-49A3-840A-FD94FD31C89C}.Debug|Win32.Build.0 = Debug|Win32
{293FE1A0-EFBC-49A3-840A-FD94FD31C89C}.Release|Win32.ActiveCfg = Release|Win32
{293FE1A0-EFBC-49A3-840A-FD94FD31C89C}.Release|Win32.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

View File

@ -0,0 +1,30 @@
#pragma comment (lib, "d3d11.lib")
#pragma comment (lib, "Dxgi.lib")
#include <fstream>
#include "wddm.h"
using namespace std;
int main(int argc, const char* argv[]) {
WDDM wddm;
wddm.wf_dxgi_init();
byte* data;
int pitch;
RECT rect;
rect.left=0;
rect.top=0;
rect.bottom=600;
rect.right=600;
while(true) {
wddm.wf_dxgi_getPixelData(&data, &pitch, &rect);
}
system("pause");
wddm.wf_dxgi_cleanup();
}

View File

@ -0,0 +1,383 @@
#pragma once
//#pragma comment (lib, "d3dx11.lib")
//#pragma comment (lib, "d3dx10.lib")
#include <windows.h>
#include <windowsx.h>
//#define CINTERFACE
#include <DXGItype.h>
#include <D3D11.h>
#include <dxgi1_2.h>
#include <tchar.h>
/* Driver types supported */
D3D_DRIVER_TYPE DriverTypes[] =
{
D3D_DRIVER_TYPE_HARDWARE,
D3D_DRIVER_TYPE_WARP,
D3D_DRIVER_TYPE_REFERENCE,
};
UINT NumDriverTypes = ARRAYSIZE(DriverTypes);
D3D_FEATURE_LEVEL FeatureLevels[] =
{
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_1
};
UINT NumFeatureLevels = ARRAYSIZE(FeatureLevels);
D3D_FEATURE_LEVEL FeatureLevel;
ID3D11Device* gDevice = NULL;
ID3D11DeviceContext* gContext = NULL;
IDXGIOutputDuplication* gOutputDuplication = NULL;
IDXGISurface* surf;
ID3D11Texture2D* sStage;
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
class WDDM {
public:
int wf_dxgi_init()
{
//not sure if needed
gAcquiredDesktopImage = NULL;
if (wf_dxgi_createDevice() != 0)
{
return 1;
}
if (wf_dxgi_getDuplication(0) != 0)
{
return 1;
}
return 0;
}
int wf_dxgi_createDevice()
{
HRESULT status;
UINT DriverTypeIndex;
for (DriverTypeIndex = 0; DriverTypeIndex < NumDriverTypes; ++DriverTypeIndex)
{
status = D3D11CreateDevice(NULL, DriverTypes[DriverTypeIndex], NULL, 0, FeatureLevels, NumFeatureLevels,
D3D11_SDK_VERSION, &gDevice, &FeatureLevel, &gContext);
if (SUCCEEDED(status))
break;
_tprintf(_T("D3D11CreateDevice returned [%d] for Driver Type %d\n"), status, DriverTypes[DriverTypeIndex]);
}
if (FAILED(status))
{
_tprintf(_T("Failed to create device in InitializeDx\n"));
return 1;
}
return 0;
}
int wf_dxgi_getDuplication(UINT screenID)
{
HRESULT status;
UINT i = 0;
DXGI_OUTPUT_DESC desc;
IDXGIOutput * pOutput;
IDXGIDevice* DxgiDevice = NULL;
IDXGIAdapter* DxgiAdapter = NULL;
IDXGIOutput* DxgiOutput = NULL;
IDXGIOutput1* DxgiOutput1 = NULL;
status = gDevice->QueryInterface(__uuidof(IDXGIDevice), (void**) &DxgiDevice);
if (FAILED(status))
{
_tprintf(_T("Failed to get QI for DXGI Device\n"));
return 1;
}
status = DxgiDevice->GetParent(__uuidof(IDXGIAdapter), (void**) &DxgiAdapter);
DxgiDevice->Release();
DxgiDevice = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get parent DXGI Adapter\n"));
return 1;
}
ZeroMemory(&desc, sizeof(desc));
pOutput = NULL;
while (DxgiAdapter->EnumOutputs(i, &pOutput) != DXGI_ERROR_NOT_FOUND)
{
DXGI_OUTPUT_DESC* pDesc = &desc;
status = pOutput->GetDesc(pDesc);
if (FAILED(status))
{
_tprintf(_T("Failed to get description\n"));
return 1;
}
_tprintf(_T("Output %d: [%s] [%d]\n"), i, pDesc->DeviceName, pDesc->AttachedToDesktop);
/*if (pDesc->AttachedToDesktop)
dTop = i;*/
pOutput->Release();
++i;
}
status = DxgiAdapter->EnumOutputs(screenID, &DxgiOutput);
DxgiAdapter->Release();
DxgiAdapter = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get output\n"));
return 1;
}
status = DxgiOutput->QueryInterface(__uuidof(IDXGIOutput1), (void**) &DxgiOutput1);
DxgiOutput->Release();
DxgiOutput = NULL;
if (FAILED(status))
{
_tprintf(_T("Failed to get IDXGIOutput1\n"));
return 1;
}
status = DxgiOutput1->DuplicateOutput((IUnknown*)gDevice, &gOutputDuplication);
DxgiOutput1->Release();
DxgiOutput1 = NULL;
if (FAILED(status))
{
if (status == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE)
{
_tprintf(_T("There is already the maximum number of applications using the Desktop Duplication API running, please close one of those applications and then try again.\n"));
return 1;
}
_tprintf(_T("Failed to get duplicate output. Status = %#X\n"), status);
return 1;
}
return 0;
}
int wf_dxgi_cleanup()
{
/*if (framesWaiting > 0)
{
wf_dxgi_releasePixelData(wfi);
}*/
if (gAcquiredDesktopImage)
{
gAcquiredDesktopImage->Release();
gAcquiredDesktopImage = NULL;
}
if (gOutputDuplication)
{
gOutputDuplication->Release();
gOutputDuplication = NULL;
}
if(gContext)
{
gContext->Release();
gContext = NULL;
}
if(gDevice)
{
gDevice->Release();
gDevice = NULL;
}
return 0;
}
int wf_dxgi_nextFrame(UINT timeout)
{
HRESULT status = 0;
UINT i = 0;
UINT DataBufferSize = 0;
BYTE* DataBuffer = NULL;
IDXGIResource* DesktopResource = NULL;
if (gAcquiredDesktopImage)
{
gAcquiredDesktopImage->Release();
gAcquiredDesktopImage = NULL;
}
status = gOutputDuplication->AcquireNextFrame(timeout, &FrameInfo, &DesktopResource);
if (status == DXGI_ERROR_WAIT_TIMEOUT)
{
return 1;
}
if (FAILED(status))
{
if (status == DXGI_ERROR_ACCESS_LOST)
{
_tprintf(_T("Failed to acquire next frame with status=%#X\n"), status);
_tprintf(_T("Trying to reinitialize due to ACCESS LOST..."));
if (gAcquiredDesktopImage)
{
gAcquiredDesktopImage->Release();
gAcquiredDesktopImage = NULL;
}
if (gOutputDuplication)
{
gOutputDuplication->Release();
gOutputDuplication = NULL;
}
wf_dxgi_getDuplication(0); // TODO
return 1;
}
else
{
_tprintf(_T("Failed to acquire next frame with status=%#X\n"), status);
status = gOutputDuplication->ReleaseFrame();
if (FAILED(status))
{
_tprintf(_T("Failed to release frame with status=%d\n"), status);
}
return 1;
}
}
status = DesktopResource->QueryInterface(__uuidof(ID3D11Texture2D), (void**) &gAcquiredDesktopImage);
DesktopResource->Release();
DesktopResource = NULL;
if (FAILED(status))
{
return 1;
}
//wfi->framesWaiting = FrameInfo.AccumulatedFrames;
if (FrameInfo.AccumulatedFrames == 0)
{
status = gOutputDuplication->ReleaseFrame();
if (FAILED(status))
{
_tprintf(_T("Failed to release frame with status=%d\n"), status);
}
}
return 0;
}
int wf_dxgi_getPixelData(BYTE** data, int* pitch, RECT* invalid)
{
HRESULT status;
D3D11_BOX Box;
DXGI_MAPPED_RECT mappedRect;
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Width = (invalid->right - invalid->left);
tDesc.Height = (invalid->bottom - invalid->top);
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Usage = D3D11_USAGE_STAGING;
tDesc.BindFlags = 0;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
tDesc.MiscFlags = 0;
Box.top = invalid->top;
Box.left = invalid->left;
Box.right = invalid->right;
Box.bottom = invalid->bottom;
Box.front = 0;
Box.back = 1;
status = gDevice->CreateTexture2D(&tDesc, NULL, &sStage);
if (FAILED(status))
{
_tprintf(_T("Failed to create staging surface\n"));
exit(1);
return 1;
}
gContext->CopySubresourceRegion((ID3D11Resource*) sStage, 0,0,0,0, (ID3D11Resource*) gAcquiredDesktopImage, 0, &Box);
status = sStage->QueryInterface(_uuidof(IDXGISurface), (void**) &surf);
if (FAILED(status))
{
_tprintf(_T("Failed to QI staging surface\n"));
exit(1);
return 1;
}
surf->Map(&mappedRect, DXGI_MAP_READ);
if (FAILED(status))
{
_tprintf(_T("Failed to map staging surface\n"));
exit(1);
return 1;
}
*data = mappedRect.pBits;
*pitch = mappedRect.Pitch;
return 0;
}
int wf_dxgi_releasePixelData()
{
HRESULT status;
surf->Unmap();
surf->Release();
surf = NULL;
sStage->Release();
sStage = NULL;
status = gOutputDuplication->ReleaseFrame();
if (FAILED(status))
{
_tprintf(_T("Failed to release frame\n"));
return 1;
}
//wfi->framesWaiting = 0;
return 0;
}
private:
ID3D11Texture2D* gAcquiredDesktopImage;
};

View File

@ -0,0 +1,27 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="wddm.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
</Project>