优化Token溢出时的处理
This commit is contained in:
		
							parent
							
								
									17a18e99fa
								
							
						
					
					
						commit
						cb9404c4de
					
				@ -23,6 +23,8 @@ else:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改
 | 
			
		||||
# 对话窗的高度
 | 
			
		||||
CHATBOT_HEIGHT = 1117
 | 
			
		||||
 | 
			
		||||
# 发送请求到OpenAI后,等待多久判定为超时
 | 
			
		||||
TIMEOUT_SECONDS = 25
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										3276
									
								
								crazy_functions/test_project/cpp/longcode/jpgd.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3276
									
								
								crazy_functions/test_project/cpp/longcode/jpgd.cpp
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1049
									
								
								crazy_functions/test_project/cpp/longcode/jpge.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1049
									
								
								crazy_functions/test_project/cpp/longcode/jpge.cpp
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										433
									
								
								crazy_functions/test_project/cpp/longcode/prod_cons.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										433
									
								
								crazy_functions/test_project/cpp/longcode/prod_cons.h
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,433 @@
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include <atomic>
 | 
			
		||||
#include <utility>
 | 
			
		||||
#include <cstring>
 | 
			
		||||
#include <type_traits>
 | 
			
		||||
#include <cstdint>
 | 
			
		||||
 | 
			
		||||
#include "libipc/def.h"
 | 
			
		||||
 | 
			
		||||
#include "libipc/platform/detail.h"
 | 
			
		||||
#include "libipc/circ/elem_def.h"
 | 
			
		||||
#include "libipc/utility/log.h"
 | 
			
		||||
#include "libipc/utility/utility.h"
 | 
			
		||||
 | 
			
		||||
namespace ipc {
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////
 | 
			
		||||
/// producer-consumer implementation
 | 
			
		||||
////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <typename Flag>
 | 
			
		||||
struct prod_cons_impl;
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
 | 
			
		||||
 | 
			
		||||
    template <std::size_t DataSize, std::size_t AlignSize>
 | 
			
		||||
    struct elem_t {
 | 
			
		||||
        std::aligned_storage_t<DataSize, AlignSize> data_ {};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
 | 
			
		||||
    alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
 | 
			
		||||
 | 
			
		||||
    constexpr circ::u2_t cursor() const noexcept {
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool push(W* /*wrapper*/, F&& f, E* elems) {
 | 
			
		||||
        auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
 | 
			
		||||
        if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
 | 
			
		||||
            return false; // full
 | 
			
		||||
        }
 | 
			
		||||
        std::forward<F>(f)(&(elems[cur_wt].data_));
 | 
			
		||||
        wt_.fetch_add(1, std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /**
 | 
			
		||||
     * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
 | 
			
		||||
     * So we could just disconnect all connections of receiver, and return false.
 | 
			
		||||
    */
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool force_push(W* wrapper, F&&, E*) {
 | 
			
		||||
        wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename R, typename E>
 | 
			
		||||
    bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
 | 
			
		||||
        auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
 | 
			
		||||
        if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
 | 
			
		||||
            return false; // empty
 | 
			
		||||
        }
 | 
			
		||||
        std::forward<F>(f)(&(elems[cur_rd].data_));
 | 
			
		||||
        std::forward<R>(out)(true);
 | 
			
		||||
        rd_.fetch_add(1, std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
 | 
			
		||||
     : prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool force_push(W* wrapper, F&&, E*) {
 | 
			
		||||
        wrapper->elems()->disconnect_receiver(1);
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename R, 
 | 
			
		||||
              template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
 | 
			
		||||
    bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
 | 
			
		||||
        byte_t buff[DS];
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            auto cur_rd = rd_.load(std::memory_order_relaxed);
 | 
			
		||||
            if (circ::index_of(cur_rd) ==
 | 
			
		||||
                circ::index_of(wt_.load(std::memory_order_acquire))) {
 | 
			
		||||
                return false; // empty
 | 
			
		||||
            }
 | 
			
		||||
            std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
 | 
			
		||||
            if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
 | 
			
		||||
                std::forward<F>(f)(buff);
 | 
			
		||||
                std::forward<R>(out)(true);
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
 | 
			
		||||
     : prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
 | 
			
		||||
 | 
			
		||||
    using flag_t = std::uint64_t;
 | 
			
		||||
 | 
			
		||||
    template <std::size_t DataSize, std::size_t AlignSize>
 | 
			
		||||
    struct elem_t {
 | 
			
		||||
        std::aligned_storage_t<DataSize, AlignSize> data_ {};
 | 
			
		||||
        std::atomic<flag_t> f_ct_ { 0 }; // commit flag
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool push(W* /*wrapper*/, F&& f, E* elems) {
 | 
			
		||||
        circ::u2_t cur_ct, nxt_ct;
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            cur_ct = ct_.load(std::memory_order_relaxed);
 | 
			
		||||
            if (circ::index_of(nxt_ct = cur_ct + 1) ==
 | 
			
		||||
                circ::index_of(rd_.load(std::memory_order_acquire))) {
 | 
			
		||||
                return false; // full
 | 
			
		||||
            }
 | 
			
		||||
            if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
        auto* el = elems + circ::index_of(cur_ct);
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        // set flag & try update wt
 | 
			
		||||
        el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
 | 
			
		||||
        while (1) {
 | 
			
		||||
            auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
 | 
			
		||||
            if (cur_ct != wt_.load(std::memory_order_relaxed)) {
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            if ((~cac_ct) != cur_ct) {
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            wt_.store(nxt_ct, std::memory_order_release);
 | 
			
		||||
            cur_ct = nxt_ct;
 | 
			
		||||
            nxt_ct = cur_ct + 1;
 | 
			
		||||
            el = elems + circ::index_of(cur_ct);
 | 
			
		||||
        }
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool force_push(W* wrapper, F&&, E*) {
 | 
			
		||||
        wrapper->elems()->disconnect_receiver(1);
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename R, 
 | 
			
		||||
              template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
 | 
			
		||||
    bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
 | 
			
		||||
        byte_t buff[DS];
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            auto cur_rd = rd_.load(std::memory_order_relaxed);
 | 
			
		||||
            auto cur_wt = wt_.load(std::memory_order_acquire);
 | 
			
		||||
            auto id_rd  = circ::index_of(cur_rd);
 | 
			
		||||
            auto id_wt  = circ::index_of(cur_wt);
 | 
			
		||||
            if (id_rd == id_wt) {
 | 
			
		||||
                auto* el = elems + id_wt;
 | 
			
		||||
                auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
 | 
			
		||||
                if ((~cac_ct) != cur_wt) {
 | 
			
		||||
                    return false; // empty
 | 
			
		||||
                }
 | 
			
		||||
                if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
 | 
			
		||||
                    wt_.store(cur_wt + 1, std::memory_order_release);
 | 
			
		||||
                }
 | 
			
		||||
                k = 0;
 | 
			
		||||
            }
 | 
			
		||||
            else {
 | 
			
		||||
                std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
 | 
			
		||||
                if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
 | 
			
		||||
                    std::forward<F>(f)(buff);
 | 
			
		||||
                    std::forward<R>(out)(true);
 | 
			
		||||
                    return true;
 | 
			
		||||
                }
 | 
			
		||||
                ipc::yield(k);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
 | 
			
		||||
 | 
			
		||||
    using rc_t = std::uint64_t;
 | 
			
		||||
 | 
			
		||||
    enum : rc_t {
 | 
			
		||||
        ep_mask = 0x00000000ffffffffull,
 | 
			
		||||
        ep_incr = 0x0000000100000000ull
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <std::size_t DataSize, std::size_t AlignSize>
 | 
			
		||||
    struct elem_t {
 | 
			
		||||
        std::aligned_storage_t<DataSize, AlignSize> data_ {};
 | 
			
		||||
        std::atomic<rc_t> rc_ { 0 }; // read-counter
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    alignas(cache_line_size) std::atomic<circ::u2_t> wt_;   // write index
 | 
			
		||||
    alignas(cache_line_size) rc_t epoch_ { 0 };             // only one writer
 | 
			
		||||
 | 
			
		||||
    circ::u2_t cursor() const noexcept {
 | 
			
		||||
        return wt_.load(std::memory_order_acquire);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool push(W* wrapper, F&& f, E* elems) {
 | 
			
		||||
        E* el;
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
 | 
			
		||||
            if (cc == 0) return false; // no reader
 | 
			
		||||
            el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
 | 
			
		||||
            // check all consumers have finished reading this element
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_acquire);
 | 
			
		||||
            circ::cc_t rem_cc = cur_rc & ep_mask;
 | 
			
		||||
            if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
 | 
			
		||||
                return false; // has not finished yet
 | 
			
		||||
            }
 | 
			
		||||
            // consider rem_cc to be 0 here
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(
 | 
			
		||||
                        cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        wt_.fetch_add(1, std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool force_push(W* wrapper, F&& f, E* elems) {
 | 
			
		||||
        E* el;
 | 
			
		||||
        epoch_ += ep_incr;
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
 | 
			
		||||
            if (cc == 0) return false; // no reader
 | 
			
		||||
            el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
 | 
			
		||||
            // check all consumers have finished reading this element
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_acquire);
 | 
			
		||||
            circ::cc_t rem_cc = cur_rc & ep_mask;
 | 
			
		||||
            if (cc & rem_cc) {
 | 
			
		||||
                ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
 | 
			
		||||
                cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
 | 
			
		||||
                if (cc == 0) return false; // no reader
 | 
			
		||||
            }
 | 
			
		||||
            // just compare & exchange
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(
 | 
			
		||||
                        cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        wt_.fetch_add(1, std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename R, typename E>
 | 
			
		||||
    bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
 | 
			
		||||
        if (cur == cursor()) return false; // acquire
 | 
			
		||||
        auto* el = elems + circ::index_of(cur++);
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_acquire);
 | 
			
		||||
            if ((cur_rc & ep_mask) == 0) {
 | 
			
		||||
                std::forward<R>(out)(true);
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
 | 
			
		||||
                std::forward<R>(out)((nxt_rc & ep_mask) == 0);
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <>
 | 
			
		||||
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
 | 
			
		||||
 | 
			
		||||
    using rc_t   = std::uint64_t;
 | 
			
		||||
    using flag_t = std::uint64_t;
 | 
			
		||||
 | 
			
		||||
    enum : rc_t {
 | 
			
		||||
        rc_mask = 0x00000000ffffffffull,
 | 
			
		||||
        ep_mask = 0x00ffffffffffffffull,
 | 
			
		||||
        ep_incr = 0x0100000000000000ull,
 | 
			
		||||
        ic_mask = 0xff000000ffffffffull,
 | 
			
		||||
        ic_incr = 0x0000000100000000ull
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template <std::size_t DataSize, std::size_t AlignSize>
 | 
			
		||||
    struct elem_t {
 | 
			
		||||
        std::aligned_storage_t<DataSize, AlignSize> data_ {};
 | 
			
		||||
        std::atomic<rc_t  > rc_   { 0 }; // read-counter
 | 
			
		||||
        std::atomic<flag_t> f_ct_ { 0 }; // commit flag
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    alignas(cache_line_size) std::atomic<circ::u2_t> ct_;   // commit index
 | 
			
		||||
    alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
 | 
			
		||||
 | 
			
		||||
    circ::u2_t cursor() const noexcept {
 | 
			
		||||
        return ct_.load(std::memory_order_acquire);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr static rc_t inc_rc(rc_t rc) noexcept {
 | 
			
		||||
        return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    constexpr static rc_t inc_mask(rc_t rc) noexcept {
 | 
			
		||||
        return inc_rc(rc) & ~rc_mask;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool push(W* wrapper, F&& f, E* elems) {
 | 
			
		||||
        E* el;
 | 
			
		||||
        circ::u2_t cur_ct;
 | 
			
		||||
        rc_t epoch = epoch_.load(std::memory_order_acquire);
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
 | 
			
		||||
            if (cc == 0) return false; // no reader
 | 
			
		||||
            el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
 | 
			
		||||
            // check all consumers have finished reading this element
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_relaxed);
 | 
			
		||||
            circ::cc_t rem_cc = cur_rc & rc_mask;
 | 
			
		||||
            if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
 | 
			
		||||
                return false; // has not finished yet
 | 
			
		||||
            }
 | 
			
		||||
            else if (!rem_cc) {
 | 
			
		||||
                auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
 | 
			
		||||
                if ((cur_fl != cur_ct) && cur_fl) {
 | 
			
		||||
                    return false; // full
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            // consider rem_cc to be 0 here
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(
 | 
			
		||||
                        cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
 | 
			
		||||
                epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
        // only one thread/process would touch here at one time
 | 
			
		||||
        ct_.store(cur_ct + 1, std::memory_order_release);
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        // set flag & try update wt
 | 
			
		||||
        el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename E>
 | 
			
		||||
    bool force_push(W* wrapper, F&& f, E* elems) {
 | 
			
		||||
        E* el;
 | 
			
		||||
        circ::u2_t cur_ct;
 | 
			
		||||
        rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
 | 
			
		||||
            if (cc == 0) return false; // no reader
 | 
			
		||||
            el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
 | 
			
		||||
            // check all consumers have finished reading this element
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_acquire);
 | 
			
		||||
            circ::cc_t rem_cc = cur_rc & rc_mask;
 | 
			
		||||
            if (cc & rem_cc) {
 | 
			
		||||
                ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
 | 
			
		||||
                cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
 | 
			
		||||
                if (cc == 0) return false; // no reader
 | 
			
		||||
            }
 | 
			
		||||
            // just compare & exchange
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(
 | 
			
		||||
                        cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
 | 
			
		||||
                if (epoch == epoch_.load(std::memory_order_acquire)) {
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                else if (push(wrapper, std::forward<F>(f), elems)) {
 | 
			
		||||
                    return true;
 | 
			
		||||
                }
 | 
			
		||||
                epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
        // only one thread/process would touch here at one time
 | 
			
		||||
        ct_.store(cur_ct + 1, std::memory_order_release);
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        // set flag & try update wt
 | 
			
		||||
        el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template <typename W, typename F, typename R, typename E, std::size_t N>
 | 
			
		||||
    bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
 | 
			
		||||
        auto* el = elems + circ::index_of(cur);
 | 
			
		||||
        auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
 | 
			
		||||
        if (cur_fl != ~static_cast<flag_t>(cur)) {
 | 
			
		||||
            return false; // empty
 | 
			
		||||
        }
 | 
			
		||||
        ++cur;
 | 
			
		||||
        std::forward<F>(f)(&(el->data_));
 | 
			
		||||
        for (unsigned k = 0;;) {
 | 
			
		||||
            auto cur_rc = el->rc_.load(std::memory_order_acquire);
 | 
			
		||||
            if ((cur_rc & rc_mask) == 0) {
 | 
			
		||||
                std::forward<R>(out)(true);
 | 
			
		||||
                el->f_ct_.store(cur + N - 1, std::memory_order_release);
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
 | 
			
		||||
            bool last_one = false;
 | 
			
		||||
            if ((last_one = (nxt_rc & rc_mask) == 0)) {
 | 
			
		||||
                el->f_ct_.store(cur + N - 1, std::memory_order_release);
 | 
			
		||||
            }
 | 
			
		||||
            if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
 | 
			
		||||
                std::forward<R>(out)(last_one);
 | 
			
		||||
                return true;
 | 
			
		||||
            }
 | 
			
		||||
            ipc::yield(k);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace ipc
 | 
			
		||||
@ -11,7 +11,7 @@ def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPr
 | 
			
		||||
    for i in range(5):
 | 
			
		||||
        currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
 | 
			
		||||
        currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
 | 
			
		||||
        i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述改事件的三个最重要的单词。'
 | 
			
		||||
        i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
 | 
			
		||||
        chatbot.append((i_say, "[Local Message] waiting gpt response."))
 | 
			
		||||
        yield chatbot, history, '正常'  # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -19,10 +19,10 @@ def get_crazy_functionals():
 | 
			
		||||
 | 
			
		||||
    function_plugins = {
 | 
			
		||||
        "请解析并解构此项目本身": {
 | 
			
		||||
            # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
 | 
			
		||||
            "AsButton": False,  # 加入下拉菜单中
 | 
			
		||||
            "Function": 解析项目本身
 | 
			
		||||
        },
 | 
			
		||||
        "解析整个py项目": {
 | 
			
		||||
        "解析整个Py项目": {
 | 
			
		||||
            "Color": "stop",    # 按钮颜色
 | 
			
		||||
            "Function": 解析一个Python项目
 | 
			
		||||
        },
 | 
			
		||||
@ -32,9 +32,10 @@ def get_crazy_functionals():
 | 
			
		||||
        },
 | 
			
		||||
        "解析整个C++项目": {
 | 
			
		||||
            "Color": "stop",    # 按钮颜色
 | 
			
		||||
            "AsButton": False,  # 加入下拉菜单中
 | 
			
		||||
            "Function": 解析一个C项目
 | 
			
		||||
        },
 | 
			
		||||
        "读tex论文写摘要": {
 | 
			
		||||
        "读Tex论文写摘要": {
 | 
			
		||||
            "Color": "stop",    # 按钮颜色
 | 
			
		||||
            "Function": 读文章写摘要
 | 
			
		||||
        },
 | 
			
		||||
@ -52,7 +53,7 @@ def get_crazy_functionals():
 | 
			
		||||
        },
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    # VisibleLevel=1 经过测试,但功能未达到理想状态
 | 
			
		||||
    # VisibleLevel=1 经过测试,但功能上距离达到完美状态还差一点点
 | 
			
		||||
    if UserVisibleLevel >= 1:
 | 
			
		||||
        from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
 | 
			
		||||
        from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
 | 
			
		||||
@ -60,11 +61,11 @@ def get_crazy_functionals():
 | 
			
		||||
        function_plugins.update({
 | 
			
		||||
            "[仅供开发调试] 批量总结PDF文档": {
 | 
			
		||||
                "Color": "stop",
 | 
			
		||||
                # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
 | 
			
		||||
                "Function": HotReload(批量总结PDF文档)
 | 
			
		||||
                "Function": HotReload(批量总结PDF文档) # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
 | 
			
		||||
            },
 | 
			
		||||
            "[仅供开发调试] 批量总结PDF文档pdfminer": {
 | 
			
		||||
                "Color": "stop",
 | 
			
		||||
                "AsButton": False,  # 加入下拉菜单中
 | 
			
		||||
                "Function": HotReload(批量总结PDF文档pdfminer)
 | 
			
		||||
            },
 | 
			
		||||
            "[仅供开发调试] 批量总结Word文档": {
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										105
									
								
								main.py
									
									
									
									
									
								
							
							
						
						
									
										105
									
								
								main.py
									
									
									
									
									
								
							@ -4,9 +4,8 @@ from predict import predict
 | 
			
		||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf
 | 
			
		||||
 | 
			
		||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
 | 
			
		||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = \
 | 
			
		||||
    get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
 | 
			
		||||
 | 
			
		||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = \
 | 
			
		||||
    get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT')
 | 
			
		||||
 | 
			
		||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
 | 
			
		||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
 | 
			
		||||
@ -17,18 +16,18 @@ title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
 | 
			
		||||
 | 
			
		||||
# 问询记录, python 版本建议3.9+(越新越好)
 | 
			
		||||
import logging
 | 
			
		||||
os.makedirs('gpt_log', exist_ok=True)
 | 
			
		||||
try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8')
 | 
			
		||||
except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO)
 | 
			
		||||
print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!')
 | 
			
		||||
os.makedirs("gpt_log", exist_ok=True)
 | 
			
		||||
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
 | 
			
		||||
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
 | 
			
		||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
 | 
			
		||||
 | 
			
		||||
# 一些普通功能模块
 | 
			
		||||
from functional import get_functionals
 | 
			
		||||
functional = get_functionals()
 | 
			
		||||
 | 
			
		||||
# 对一些丧心病狂的实验性功能模块进行测试
 | 
			
		||||
# 高级函数插件
 | 
			
		||||
from functional_crazy import get_crazy_functionals
 | 
			
		||||
crazy_functional = get_crazy_functionals()
 | 
			
		||||
crazy_fns = get_crazy_functionals()
 | 
			
		||||
 | 
			
		||||
# 处理markdown文本格式的转变
 | 
			
		||||
gr.Chatbot.postprocess = format_io
 | 
			
		||||
@ -40,11 +39,10 @@ set_theme = adjust_theme()
 | 
			
		||||
cancel_handles = []
 | 
			
		||||
with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
 | 
			
		||||
    gr.HTML(title_html)
 | 
			
		||||
    with gr.Row():
 | 
			
		||||
    with gr.Row().style(equal_height=True):
 | 
			
		||||
        with gr.Column(scale=2):
 | 
			
		||||
            chatbot = gr.Chatbot()
 | 
			
		||||
            chatbot.style(height=1150)
 | 
			
		||||
            chatbot.style()
 | 
			
		||||
            chatbot.style(height=CHATBOT_HEIGHT)
 | 
			
		||||
            history = gr.State([])
 | 
			
		||||
        with gr.Column(scale=1):
 | 
			
		||||
            with gr.Row():
 | 
			
		||||
@ -66,49 +64,70 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
 | 
			
		||||
                with gr.Row():
 | 
			
		||||
                    gr.Markdown("注意:以下“红颜色”标识的函数插件需从input区读取路径作为参数.")
 | 
			
		||||
                with gr.Row():
 | 
			
		||||
                    for k in crazy_functional:
 | 
			
		||||
                        variant = crazy_functional[k]["Color"] if "Color" in crazy_functional[k] else "secondary"
 | 
			
		||||
                        crazy_functional[k]["Button"] = gr.Button(k, variant=variant)
 | 
			
		||||
                    for k in crazy_fns:
 | 
			
		||||
                        if not crazy_fns[k].get("AsButton", True): continue
 | 
			
		||||
                        variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
 | 
			
		||||
                        crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
 | 
			
		||||
                with gr.Row():
 | 
			
		||||
                    with gr.Accordion("展开“文件上传区”。上传本地文件供“红颜色”的函数插件调用。", open=False):
 | 
			
		||||
                        file_upload = gr.Files(label='任何文件, 但推荐上传压缩文件(zip, tar)', file_count="multiple")
 | 
			
		||||
                    with gr.Accordion("更多函数插件", open=True):
 | 
			
		||||
                        dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
 | 
			
		||||
                        with gr.Column(scale=1):
 | 
			
		||||
                            dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
 | 
			
		||||
                        with gr.Column(scale=1):
 | 
			
		||||
                            switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
 | 
			
		||||
                with gr.Row():
 | 
			
		||||
                    with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
 | 
			
		||||
                        file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
 | 
			
		||||
            with gr.Accordion("展开SysPrompt & GPT参数 & 交互界面布局", open=False):
 | 
			
		||||
                system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
 | 
			
		||||
                top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
 | 
			
		||||
                temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
 | 
			
		||||
                checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], 
 | 
			
		||||
                                        value=["基础功能区", "函数插件区"], label="显示哪些功能区")
 | 
			
		||||
                checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
 | 
			
		||||
 | 
			
		||||
    def what_is_this(a):
 | 
			
		||||
    # 功能区显示开关与功能区的互动
 | 
			
		||||
    def fn_area_visibility(a):
 | 
			
		||||
        ret = {}
 | 
			
		||||
        # if area_basic_fn.visible != ("基础功能区" in a): 
 | 
			
		||||
        ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) 
 | 
			
		||||
        # if area_crazy_fn.visible != ("函数插件区" in a): 
 | 
			
		||||
        ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) 
 | 
			
		||||
        ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
 | 
			
		||||
        ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    checkboxes.select(what_is_this, [checkboxes], [area_basic_fn, area_crazy_fn] )
 | 
			
		||||
 | 
			
		||||
    predict_args = dict(fn=predict, inputs=[txt, top_p, temperature, chatbot, history, system_prompt], outputs=[chatbot, history, statusDisplay], show_progress=True)
 | 
			
		||||
    checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn] )
 | 
			
		||||
    # 整理反复出现的控件句柄组合
 | 
			
		||||
    input_combo = [txt, top_p, temperature, chatbot, history, system_prompt]
 | 
			
		||||
    output_combo = [chatbot, history, statusDisplay]
 | 
			
		||||
    predict_args = dict(fn=predict, inputs=input_combo, outputs=output_combo, show_progress=True)
 | 
			
		||||
    empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt]) # 用于在提交后清空输入栏
 | 
			
		||||
 | 
			
		||||
    cancel_handles.append(txt.submit(**predict_args))
 | 
			
		||||
    # txt.submit(**empty_txt_args) 在提交后清空输入栏
 | 
			
		||||
    cancel_handles.append(submitBtn.click(**predict_args))
 | 
			
		||||
    # submitBtn.click(**empty_txt_args) 在提交后清空输入栏
 | 
			
		||||
    resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, statusDisplay])
 | 
			
		||||
    # 提交按钮、重置按钮
 | 
			
		||||
    cancel_handles.append(txt.submit(**predict_args)) #; txt.submit(**empty_txt_args) 在提交后清空输入栏
 | 
			
		||||
    cancel_handles.append(submitBtn.click(**predict_args)) #; submitBtn.click(**empty_txt_args) 在提交后清空输入栏
 | 
			
		||||
    resetBtn.click(lambda: ([], [], "已重置"), None, output_combo)
 | 
			
		||||
    # 基础功能区的回调函数注册
 | 
			
		||||
    for k in functional:
 | 
			
		||||
        click_handle = functional[k]["Button"].click(predict,
 | 
			
		||||
            [txt, top_p, temperature, chatbot, history, system_prompt, gr.State(True), gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
 | 
			
		||||
        click_handle = functional[k]["Button"].click(predict, [*input_combo, gr.State(True), gr.State(k)], output_combo, show_progress=True)
 | 
			
		||||
        cancel_handles.append(click_handle)
 | 
			
		||||
    # 文件上传区,接收文件后与chatbot的互动
 | 
			
		||||
    file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
 | 
			
		||||
    for k in crazy_functional:
 | 
			
		||||
        click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
 | 
			
		||||
            [txt, top_p, temperature, chatbot, history, system_prompt, gr.State(PORT)], [chatbot, history, statusDisplay]
 | 
			
		||||
        )
 | 
			
		||||
        try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
 | 
			
		||||
        except: pass
 | 
			
		||||
    # 函数插件-固定按钮区
 | 
			
		||||
    for k in crazy_fns:
 | 
			
		||||
        if not crazy_fns[k].get("AsButton", True): continue
 | 
			
		||||
        click_handle = crazy_fns[k]["Button"].click(crazy_fns[k]["Function"], [*input_combo, gr.State(PORT)], output_combo)
 | 
			
		||||
        click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
 | 
			
		||||
        cancel_handles.append(click_handle)
 | 
			
		||||
    # 函数插件-下拉菜单与随变按钮的互动
 | 
			
		||||
    def on_dropdown_changed(k):
 | 
			
		||||
        variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
 | 
			
		||||
        return {switchy_bt: gr.update(value=k, variant=variant)}
 | 
			
		||||
    dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
 | 
			
		||||
    # 随变按钮的回调函数注册
 | 
			
		||||
    def route(k, *args, **kwargs):
 | 
			
		||||
        if k in [r"打开插件列表", r"先从插件列表中选择"]: return 
 | 
			
		||||
        yield from crazy_fns[k]["Function"](*args, **kwargs)
 | 
			
		||||
    click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
 | 
			
		||||
    click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
 | 
			
		||||
    def expand_file_area(file_upload, area_file_up):
 | 
			
		||||
        if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
 | 
			
		||||
    click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
 | 
			
		||||
    cancel_handles.append(click_handle)
 | 
			
		||||
    # 终止按钮的回调函数注册
 | 
			
		||||
    stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
 | 
			
		||||
 | 
			
		||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
 | 
			
		||||
@ -117,7 +136,7 @@ def auto_opentab_delay():
 | 
			
		||||
    print(f"如果浏览器没有自动打开,请复制并转到以下URL: http://localhost:{PORT}")
 | 
			
		||||
    def open(): 
 | 
			
		||||
        time.sleep(2)
 | 
			
		||||
        webbrowser.open_new_tab(f'http://localhost:{PORT}')
 | 
			
		||||
        webbrowser.open_new_tab(f"http://localhost:{PORT}")
 | 
			
		||||
    threading.Thread(target=open, name="open-browser", daemon=True).start()
 | 
			
		||||
 | 
			
		||||
auto_opentab_delay()
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										12
									
								
								predict.py
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								predict.py
									
									
									
									
									
								
							@ -96,13 +96,19 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr
 | 
			
		||||
        except StopIteration: break
 | 
			
		||||
        if len(chunk)==0: continue
 | 
			
		||||
        if not chunk.startswith('data:'): 
 | 
			
		||||
            chunk = get_full_error(chunk.encode('utf8'), stream_response)
 | 
			
		||||
            raise ConnectionAbortedError("OpenAI拒绝了请求:" + chunk.decode())
 | 
			
		||||
        delta = json.loads(chunk.lstrip('data:'))['choices'][0]["delta"]
 | 
			
		||||
            error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
 | 
			
		||||
            if "reduce the length" in error_msg:
 | 
			
		||||
                raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
 | 
			
		||||
            else:
 | 
			
		||||
                raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
 | 
			
		||||
        json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
 | 
			
		||||
        delta = json_data["delta"]
 | 
			
		||||
        if len(delta) == 0: break
 | 
			
		||||
        if "role" in delta: continue
 | 
			
		||||
        if "content" in delta: result += delta["content"]; print(delta["content"], end='')
 | 
			
		||||
        else: raise RuntimeError("意外Json结构:"+delta)
 | 
			
		||||
    if json_data['finish_reason'] == 'length':
 | 
			
		||||
        raise ConnectionAbortedError("正常结束,但显示Token不足。")
 | 
			
		||||
    return result
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										25
									
								
								toolbox.py
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								toolbox.py
									
									
									
									
									
								
							@ -2,21 +2,21 @@ import markdown, mdtex2html, threading, importlib, traceback, importlib, inspect
 | 
			
		||||
from show_math import convert as convert_math
 | 
			
		||||
from functools import wraps
 | 
			
		||||
 | 
			
		||||
def get_reduce_token_percent(e):
 | 
			
		||||
def get_reduce_token_percent(text):
 | 
			
		||||
    try:
 | 
			
		||||
        # text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
 | 
			
		||||
        pattern = r"(\d+)\s+tokens\b"
 | 
			
		||||
        match = re.findall(pattern, text)
 | 
			
		||||
        eps = 50 # 稍微留一点余地, 确保下次别再超过token
 | 
			
		||||
        max_limit = float(match[0]) - eps
 | 
			
		||||
        EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
 | 
			
		||||
        max_limit = float(match[0]) - EXCEED_ALLO
 | 
			
		||||
        current_tokens = float(match[1])
 | 
			
		||||
        ratio = max_limit/current_tokens
 | 
			
		||||
        assert ratio > 0 and ratio < 1
 | 
			
		||||
        return ratio
 | 
			
		||||
        return ratio, str(int(current_tokens-max_limit))
 | 
			
		||||
    except:
 | 
			
		||||
        return 0.5
 | 
			
		||||
        return 0.5, '不详'
 | 
			
		||||
 | 
			
		||||
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=False):
 | 
			
		||||
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
 | 
			
		||||
    """
 | 
			
		||||
        调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
 | 
			
		||||
        i_say: 当前输入
 | 
			
		||||
@ -45,19 +45,18 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
 | 
			
		||||
                break
 | 
			
		||||
            except ConnectionAbortedError as token_exceeded_error:
 | 
			
		||||
                # 尝试计算比例,尽可能多地保留文本
 | 
			
		||||
                p_ratio = get_reduce_token_percent(str(token_exceeded_error))
 | 
			
		||||
                p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
 | 
			
		||||
                if len(history) > 0:
 | 
			
		||||
                    history = [his[     int(len(his)    *p_ratio):      ] for his in history if his is not None]
 | 
			
		||||
                    mutable[1] = 'Warning! History conversation is too long, cut into half. '
 | 
			
		||||
                else:
 | 
			
		||||
                    i_say = i_say[:     int(len(i_say)  *p_ratio)     ]
 | 
			
		||||
                    mutable[1] = 'Warning! Input file is too long, cut into half. '
 | 
			
		||||
                mutable[1] = f'警告,文本过长将进行截断,Token溢出数:{n_exceed},截断比例:{(1-p_ratio):.0%}。'
 | 
			
		||||
            except TimeoutError as e:
 | 
			
		||||
                mutable[0] = '[Local Message] Failed with timeout.'
 | 
			
		||||
                mutable[0] = '[Local Message] 请求超时。'
 | 
			
		||||
                raise TimeoutError
 | 
			
		||||
            except Exception as e:
 | 
			
		||||
                mutable[0] = f'[Local Message] Failed with {str(e)}.'
 | 
			
		||||
                raise RuntimeError(f'[Local Message] Failed with {str(e)}.')
 | 
			
		||||
                mutable[0] = f'[Local Message] 异常:{str(e)}.'
 | 
			
		||||
                raise RuntimeError(f'[Local Message] 异常:{str(e)}.')
 | 
			
		||||
    # 创建新线程发出http请求
 | 
			
		||||
    thread_name = threading.Thread(target=mt, args=(i_say, history)); thread_name.start()
 | 
			
		||||
    # 原来的线程则负责持续更新UI,实现一个超时倒计时,并等待新线程的任务完成
 | 
			
		||||
@ -286,7 +285,7 @@ def on_report_generated(files, chatbot):
 | 
			
		||||
    report_files = find_recent_files('gpt_log')
 | 
			
		||||
    if len(report_files) == 0: return report_files, chatbot
 | 
			
		||||
    # files.extend(report_files)
 | 
			
		||||
    chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧文件上传区,请查收。'])
 | 
			
		||||
    chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
 | 
			
		||||
    return report_files, chatbot
 | 
			
		||||
 | 
			
		||||
def get_conf(*args):
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user