|
|
|
/* SPDX-License-Identifier: MPL-1.1 OR GPL-2.0-or-later */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The contents of this file are subject to the Mozilla Public
|
|
|
|
* License Version 1.1 (the "License"); you may not use this file
|
|
|
|
* except in compliance with the License. You may obtain a copy of
|
|
|
|
* the License at http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS
|
|
|
|
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
|
|
|
|
* implied. See the License for the specific language governing
|
|
|
|
* rights and limitations under the License.
|
|
|
|
*
|
|
|
|
* The Original Code is the Netscape Portable Runtime library.
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is Netscape
|
|
|
|
* Communications Corporation. Portions created by Netscape are
|
|
|
|
* Copyright (C) 1994-2000 Netscape Communications Corporation. All
|
|
|
|
* Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s): Silicon Graphics, Inc.
|
|
|
|
*
|
|
|
|
* Portions created by SGI are Copyright (C) 2000-2001 Silicon
|
|
|
|
* Graphics, Inc. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the
|
|
|
|
* terms of the GNU General Public License Version 2 or later (the
|
|
|
|
* "GPL"), in which case the provisions of the GPL are applicable
|
|
|
|
* instead of those above. If you wish to allow use of your
|
|
|
|
* version of this file only under the terms of the GPL and not to
|
|
|
|
* allow others to use your version of this file under the MPL,
|
|
|
|
* indicate your decision by deleting the provisions above and
|
|
|
|
* replace them with the notice and other provisions required by
|
|
|
|
* the GPL. If you do not delete the provisions above, a recipient
|
|
|
|
* may use your version of this file under either the MPL or the
|
|
|
|
* GPL.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file is derived directly from Netscape Communications Corporation,
|
|
|
|
* and consists of extensive modifications made during the year(s) 1999-2000.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ST_MD_H__
|
|
|
|
#define __ST_MD_H__
|
|
|
|
|
|
|
|
#if defined(ETIMEDOUT) && !defined(ETIME)
|
|
|
|
#define ETIME ETIMEDOUT
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(MAP_ANONYMOUS) && !defined(MAP_ANON)
|
|
|
|
#define MAP_ANON MAP_ANONYMOUS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MAP_FAILED
|
|
|
|
#define MAP_FAILED -1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* We define the jmpbuf, because the system's is different in different OS */
|
|
|
|
typedef struct _st_jmp_buf {
|
|
|
|
/*
|
|
|
|
* OS CPU SIZE
|
|
|
|
* Darwin __amd64__/__x86_64__ long[8]
|
|
|
|
* Darwin __aarch64__ long[22]
|
|
|
|
* Linux __i386__ long[6]
|
|
|
|
* Linux __amd64__/__x86_64__ long[8]
|
|
|
|
* Linux __aarch64__ long[22]
|
|
|
|
* Linux __arm__ long[16]
|
|
|
|
* Linux __mips__/__mips64 long[13]
|
|
|
|
* Linux __riscv long[14]
|
|
|
|
* Linux __loongarch64 long[12]
|
|
|
|
* Cygwin64 __amd64__/__x86_64__ long[8]
|
|
|
|
*/
|
|
|
|
long __jmpbuf[22];
|
|
|
|
} _st_jmp_buf_t[1];
|
|
|
|
|
|
|
|
/* Defined in *.S file and implemented by ASM. */
|
|
|
|
extern int _st_md_cxt_save(_st_jmp_buf_t env);
|
|
|
|
extern void _st_md_cxt_restore(_st_jmp_buf_t env, int val);
|
|
|
|
|
|
|
|
/* Always use builtin setjmp/longjmp, use asm code. */
|
|
|
|
#if defined(USE_LIBC_SETJMP)
|
|
|
|
#error The libc setjmp is not supported now
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*****************************************
|
|
|
|
* Platform specifics
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined (DARWIN)
|
|
|
|
|
|
|
|
#define MD_USE_BSD_ANON_MMAP
|
|
|
|
#define MD_ACCEPT_NB_INHERITED
|
|
|
|
#define MD_HAVE_SOCKLEN_T
|
|
|
|
|
|
|
|
#if defined(__amd64__) || defined(__x86_64__)
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[6]))
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[13]))
|
|
|
|
#else
|
|
|
|
#error Unknown CPU architecture
|
|
|
|
#endif
|
|
|
|
|
ST: Use clock_gettime to prevent time jumping backwards. v7.0.17 (#3979)
try to fix #3978
**Background**
check #3978
**Research**
I referred the Android platform's solution, because I have android
background, and there is a loop to handle message inside android.
https://github.com/aosp-mirror/platform_frameworks_base/blob/ff007a03c01bf936d1e961a13adff9f266d5189c/core/java/android/os/Handler.java#L701-L706C6
```
public final boolean sendMessageDelayed(@NonNull Message msg, long delayMillis) {
if (delayMillis < 0) {
delayMillis = 0;
}
return sendMessageAtTime(msg, SystemClock.uptimeMillis() + delayMillis);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/SystemClock.cpp#L37-L51
```
/*
* native public static long uptimeMillis();
*/
int64_t uptimeMillis()
{
return nanoseconds_to_milliseconds(uptimeNanos());
}
/*
* public static native long uptimeNanos();
*/
int64_t uptimeNanos()
{
return systemTime(SYSTEM_TIME_MONOTONIC);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/Timers.cpp#L32-L55
```
#if defined(__linux__)
nsecs_t systemTime(int clock) {
checkClockId(clock);
static constexpr clockid_t clocks[] = {CLOCK_REALTIME, CLOCK_MONOTONIC,
CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID,
CLOCK_BOOTTIME};
static_assert(clock_id_max == arraysize(clocks));
timespec t = {};
clock_gettime(clocks[clock], &t);
return nsecs_t(t.tv_sec)*1000000000LL + t.tv_nsec;
}
#else
nsecs_t systemTime(int clock) {
// TODO: is this ever called with anything but REALTIME on mac/windows?
checkClockId(clock);
// Clock support varies widely across hosts. Mac OS doesn't support
// CLOCK_BOOTTIME (and doesn't even have clock_gettime until 10.12).
// Windows is windows.
timeval t = {};
gettimeofday(&t, nullptr);
return nsecs_t(t.tv_sec)*1000000000LL + nsecs_t(t.tv_usec)*1000LL;
}
#endif
```
For Linux system, we can use `clock_gettime` api, but it's first
appeared in Mac OSX 10.12.
`man clock_gettime`
The requirement is to find an alternative way to get the timestamp in
microsecond unit, but the `clock_gettime` get nanoseconds, the math
formula is the nanoseconds / 1000 = microsecond. Then I check the
performance of this api + math division.
I used those code to check the `clock_gettime` performance.
```
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <unistd.h>
int main() {
struct timeval tv;
struct timespec ts;
clock_t start;
clock_t end;
long t;
while (1) {
start = clock();
gettimeofday(&tv, NULL);
end = clock();
printf("gettimeofday clock is %lu\n", end - start);
printf("gettimeofday is %lld\n", (tv.tv_sec * 1000000LL + tv.tv_usec));
start = clock();
clock_gettime(CLOCK_MONOTONIC, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic clock is %lu\n", end - start);
printf("clock_monotonic: seconds is %ld, nanoseconds is %ld, sum is %ld\n", ts.tv_sec, ts.tv_nsec, t);
start = clock();
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic_raw clock is %lu\n", end - start);
printf("clock_monotonic_raw: nanoseconds is %ld, sum is %ld\n", ts.tv_nsec, t);
sleep(3);
}
return 0;
}
```
Here is output:
env: Mac OS M2 chip.
```
gettimeofday clock is 11
gettimeofday is 1709775727153949
clock_monotonic clock is 2
clock_monotonic: seconds is 1525204, nanoseconds is 409453000, sum is 1525204409453
clock_monotonic_raw clock is 2
clock_monotonic_raw: nanoseconds is 770493000, sum is 1525222770493
```
We can see the `clock_gettime` is faster than `gettimeofday`, so there
are no performance risks.
**MacOS solution**
`clock_gettime` api only available until mac os 10.12, for the mac os
older than 10.12, just keep the `gettimeofday`.
check osx version in `auto/options.sh`, then add MACRO in
`auto/depends.sh`, the MACRO is `MD_OSX_HAS_NO_CLOCK_GETTIME`.
**CYGWIN**
According to google search, it seems the
`clock_gettime(CLOCK_MONOTONIC)` is not support well at least 10 years
ago, but I didn't own an windows machine, so can't verify it. so keep
win's solution.
---------
Co-authored-by: winlin <winlinvip@gmail.com>
5 months ago
|
|
|
#if defined (MD_OSX_NO_CLOCK_GETTIME)
|
|
|
|
#define MD_GET_UTIME() \
|
|
|
|
struct timeval tv; \
|
|
|
|
(void) gettimeofday(&tv, NULL); \
|
|
|
|
return (tv.tv_sec * 1000000LL + tv.tv_usec)
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* https://github.com/ossrs/srs/issues/3978
|
|
|
|
* use clock_gettime to get the timestamp in microseconds.
|
|
|
|
*/
|
|
|
|
#define MD_GET_UTIME() \
|
|
|
|
struct timespec ts; \
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts); \
|
|
|
|
return (ts.tv_sec * 1000000LL + ts.tv_nsec / 1000)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined (LINUX)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are properties of the linux kernel and are the same on every
|
|
|
|
* flavor and architecture.
|
|
|
|
*/
|
|
|
|
#define MD_USE_BSD_ANON_MMAP
|
|
|
|
#define MD_ACCEPT_NB_NOT_INHERITED
|
|
|
|
/*
|
|
|
|
* Modern GNU/Linux is Posix.1g compliant.
|
|
|
|
*/
|
|
|
|
#define MD_HAVE_SOCKLEN_T
|
|
|
|
|
|
|
|
/*
|
ST: Use clock_gettime to prevent time jumping backwards. v7.0.17 (#3979)
try to fix #3978
**Background**
check #3978
**Research**
I referred the Android platform's solution, because I have android
background, and there is a loop to handle message inside android.
https://github.com/aosp-mirror/platform_frameworks_base/blob/ff007a03c01bf936d1e961a13adff9f266d5189c/core/java/android/os/Handler.java#L701-L706C6
```
public final boolean sendMessageDelayed(@NonNull Message msg, long delayMillis) {
if (delayMillis < 0) {
delayMillis = 0;
}
return sendMessageAtTime(msg, SystemClock.uptimeMillis() + delayMillis);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/SystemClock.cpp#L37-L51
```
/*
* native public static long uptimeMillis();
*/
int64_t uptimeMillis()
{
return nanoseconds_to_milliseconds(uptimeNanos());
}
/*
* public static native long uptimeNanos();
*/
int64_t uptimeNanos()
{
return systemTime(SYSTEM_TIME_MONOTONIC);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/Timers.cpp#L32-L55
```
#if defined(__linux__)
nsecs_t systemTime(int clock) {
checkClockId(clock);
static constexpr clockid_t clocks[] = {CLOCK_REALTIME, CLOCK_MONOTONIC,
CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID,
CLOCK_BOOTTIME};
static_assert(clock_id_max == arraysize(clocks));
timespec t = {};
clock_gettime(clocks[clock], &t);
return nsecs_t(t.tv_sec)*1000000000LL + t.tv_nsec;
}
#else
nsecs_t systemTime(int clock) {
// TODO: is this ever called with anything but REALTIME on mac/windows?
checkClockId(clock);
// Clock support varies widely across hosts. Mac OS doesn't support
// CLOCK_BOOTTIME (and doesn't even have clock_gettime until 10.12).
// Windows is windows.
timeval t = {};
gettimeofday(&t, nullptr);
return nsecs_t(t.tv_sec)*1000000000LL + nsecs_t(t.tv_usec)*1000LL;
}
#endif
```
For Linux system, we can use `clock_gettime` api, but it's first
appeared in Mac OSX 10.12.
`man clock_gettime`
The requirement is to find an alternative way to get the timestamp in
microsecond unit, but the `clock_gettime` get nanoseconds, the math
formula is the nanoseconds / 1000 = microsecond. Then I check the
performance of this api + math division.
I used those code to check the `clock_gettime` performance.
```
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <unistd.h>
int main() {
struct timeval tv;
struct timespec ts;
clock_t start;
clock_t end;
long t;
while (1) {
start = clock();
gettimeofday(&tv, NULL);
end = clock();
printf("gettimeofday clock is %lu\n", end - start);
printf("gettimeofday is %lld\n", (tv.tv_sec * 1000000LL + tv.tv_usec));
start = clock();
clock_gettime(CLOCK_MONOTONIC, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic clock is %lu\n", end - start);
printf("clock_monotonic: seconds is %ld, nanoseconds is %ld, sum is %ld\n", ts.tv_sec, ts.tv_nsec, t);
start = clock();
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic_raw clock is %lu\n", end - start);
printf("clock_monotonic_raw: nanoseconds is %ld, sum is %ld\n", ts.tv_nsec, t);
sleep(3);
}
return 0;
}
```
Here is output:
env: Mac OS M2 chip.
```
gettimeofday clock is 11
gettimeofday is 1709775727153949
clock_monotonic clock is 2
clock_monotonic: seconds is 1525204, nanoseconds is 409453000, sum is 1525204409453
clock_monotonic_raw clock is 2
clock_monotonic_raw: nanoseconds is 770493000, sum is 1525222770493
```
We can see the `clock_gettime` is faster than `gettimeofday`, so there
are no performance risks.
**MacOS solution**
`clock_gettime` api only available until mac os 10.12, for the mac os
older than 10.12, just keep the `gettimeofday`.
check osx version in `auto/options.sh`, then add MACRO in
`auto/depends.sh`, the MACRO is `MD_OSX_HAS_NO_CLOCK_GETTIME`.
**CYGWIN**
According to google search, it seems the
`clock_gettime(CLOCK_MONOTONIC)` is not support well at least 10 years
ago, but I didn't own an windows machine, so can't verify it. so keep
win's solution.
---------
Co-authored-by: winlin <winlinvip@gmail.com>
5 months ago
|
|
|
* https://github.com/ossrs/srs/issues/3978
|
|
|
|
* use clock_gettime to get the timestamp in microseconds.
|
|
|
|
*/
|
ST: Use clock_gettime to prevent time jumping backwards. v7.0.17 (#3979)
try to fix #3978
**Background**
check #3978
**Research**
I referred the Android platform's solution, because I have android
background, and there is a loop to handle message inside android.
https://github.com/aosp-mirror/platform_frameworks_base/blob/ff007a03c01bf936d1e961a13adff9f266d5189c/core/java/android/os/Handler.java#L701-L706C6
```
public final boolean sendMessageDelayed(@NonNull Message msg, long delayMillis) {
if (delayMillis < 0) {
delayMillis = 0;
}
return sendMessageAtTime(msg, SystemClock.uptimeMillis() + delayMillis);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/SystemClock.cpp#L37-L51
```
/*
* native public static long uptimeMillis();
*/
int64_t uptimeMillis()
{
return nanoseconds_to_milliseconds(uptimeNanos());
}
/*
* public static native long uptimeNanos();
*/
int64_t uptimeNanos()
{
return systemTime(SYSTEM_TIME_MONOTONIC);
}
```
https://github.com/aosp-mirror/platform_system_core/blob/59d9dc1f50b1ae8630ec11a431858a3cb66487b7/libutils/Timers.cpp#L32-L55
```
#if defined(__linux__)
nsecs_t systemTime(int clock) {
checkClockId(clock);
static constexpr clockid_t clocks[] = {CLOCK_REALTIME, CLOCK_MONOTONIC,
CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID,
CLOCK_BOOTTIME};
static_assert(clock_id_max == arraysize(clocks));
timespec t = {};
clock_gettime(clocks[clock], &t);
return nsecs_t(t.tv_sec)*1000000000LL + t.tv_nsec;
}
#else
nsecs_t systemTime(int clock) {
// TODO: is this ever called with anything but REALTIME on mac/windows?
checkClockId(clock);
// Clock support varies widely across hosts. Mac OS doesn't support
// CLOCK_BOOTTIME (and doesn't even have clock_gettime until 10.12).
// Windows is windows.
timeval t = {};
gettimeofday(&t, nullptr);
return nsecs_t(t.tv_sec)*1000000000LL + nsecs_t(t.tv_usec)*1000LL;
}
#endif
```
For Linux system, we can use `clock_gettime` api, but it's first
appeared in Mac OSX 10.12.
`man clock_gettime`
The requirement is to find an alternative way to get the timestamp in
microsecond unit, but the `clock_gettime` get nanoseconds, the math
formula is the nanoseconds / 1000 = microsecond. Then I check the
performance of this api + math division.
I used those code to check the `clock_gettime` performance.
```
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <unistd.h>
int main() {
struct timeval tv;
struct timespec ts;
clock_t start;
clock_t end;
long t;
while (1) {
start = clock();
gettimeofday(&tv, NULL);
end = clock();
printf("gettimeofday clock is %lu\n", end - start);
printf("gettimeofday is %lld\n", (tv.tv_sec * 1000000LL + tv.tv_usec));
start = clock();
clock_gettime(CLOCK_MONOTONIC, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic clock is %lu\n", end - start);
printf("clock_monotonic: seconds is %ld, nanoseconds is %ld, sum is %ld\n", ts.tv_sec, ts.tv_nsec, t);
start = clock();
clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
t = ts.tv_sec * 1000000L + ts.tv_nsec / 1000L;
end = clock();
printf("clock_monotonic_raw clock is %lu\n", end - start);
printf("clock_monotonic_raw: nanoseconds is %ld, sum is %ld\n", ts.tv_nsec, t);
sleep(3);
}
return 0;
}
```
Here is output:
env: Mac OS M2 chip.
```
gettimeofday clock is 11
gettimeofday is 1709775727153949
clock_monotonic clock is 2
clock_monotonic: seconds is 1525204, nanoseconds is 409453000, sum is 1525204409453
clock_monotonic_raw clock is 2
clock_monotonic_raw: nanoseconds is 770493000, sum is 1525222770493
```
We can see the `clock_gettime` is faster than `gettimeofday`, so there
are no performance risks.
**MacOS solution**
`clock_gettime` api only available until mac os 10.12, for the mac os
older than 10.12, just keep the `gettimeofday`.
check osx version in `auto/options.sh`, then add MACRO in
`auto/depends.sh`, the MACRO is `MD_OSX_HAS_NO_CLOCK_GETTIME`.
**CYGWIN**
According to google search, it seems the
`clock_gettime(CLOCK_MONOTONIC)` is not support well at least 10 years
ago, but I didn't own an windows machine, so can't verify it. so keep
win's solution.
---------
Co-authored-by: winlin <winlinvip@gmail.com>
5 months ago
|
|
|
#define MD_GET_UTIME() \
|
|
|
|
struct timespec ts; \
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &ts); \
|
|
|
|
return (ts.tv_sec * 1000000LL + ts.tv_nsec / 1000)
|
|
|
|
|
|
|
|
#if defined(__i386__)
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[4]))
|
|
|
|
#elif defined(__amd64__) || defined(__x86_64__)
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[6]))
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
/* https://github.com/ossrs/state-threads/issues/9 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[13]))
|
|
|
|
#elif defined(__arm__)
|
|
|
|
/* https://github.com/ossrs/state-threads/issues/1#issuecomment-244648573 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[8]))
|
|
|
|
#elif defined(__mips64)
|
|
|
|
/* https://github.com/ossrs/state-threads/issues/21 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[0]))
|
|
|
|
#elif defined(__mips__)
|
|
|
|
/* https://github.com/ossrs/state-threads/issues/21 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[0]))
|
|
|
|
#elif defined(__riscv)
|
|
|
|
/* https://github.com/ossrs/state-threads/pull/28 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[0]))
|
|
|
|
#elif defined(__loongarch64)
|
|
|
|
/* https://github.com/ossrs/state-threads/issues/24 */
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[0]))
|
|
|
|
#else
|
|
|
|
#error "Unknown CPU architecture"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#elif defined (CYGWIN64)
|
|
|
|
|
|
|
|
// For CYGWIN64, build SRS on Windows.
|
|
|
|
#define MD_USE_BSD_ANON_MMAP
|
|
|
|
#define MD_ACCEPT_NB_INHERITED
|
|
|
|
#define MD_HAVE_SOCKLEN_T
|
|
|
|
|
|
|
|
#if defined(__amd64__) || defined(__x86_64__)
|
|
|
|
#define MD_GET_SP(_t) *((long *)&((_t)->context[0].__jmpbuf[6]))
|
|
|
|
#else
|
|
|
|
#error Unknown CPU architecture
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MD_GET_UTIME() \
|
|
|
|
struct timeval tv; \
|
|
|
|
(void) gettimeofday(&tv, NULL); \
|
|
|
|
return (tv.tv_sec * 1000000LL + tv.tv_usec)
|
|
|
|
|
|
|
|
#else
|
|
|
|
#error Unknown OS
|
|
|
|
#endif /* OS */
|
|
|
|
|
|
|
|
#if !defined(MD_HAVE_SOCKLEN_T) && !defined(socklen_t)
|
|
|
|
#define socklen_t int
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef MD_CAP_STACK
|
|
|
|
#define MD_CAP_STACK(var_addr)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* !__ST_MD_H__ */
|
|
|
|
|